You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by al...@apache.org on 2017/06/28 00:23:59 UTC

[01/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Repository: ambari
Updated Branches:
  refs/heads/branch-feature-AMBARI-21348 7ad307c2c -> 1863c3b90


http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/metrics.json b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/metrics.json
new file mode 100755
index 0000000..604e545
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/metrics.json
@@ -0,0 +1,9370 @@
+{
+  "HBASE_REGIONSERVER": {
+    "Component": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "default": {
+            "metrics/cpu/cpu_idle":{
+              "metric":"cpu_idle",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/cpu/cpu_nice":{
+              "metric":"cpu_nice",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/cpu/cpu_system":{
+              "metric":"cpu_system",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/cpu/cpu_user":{
+              "metric":"cpu_user",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/cpu/cpu_wio":{
+              "metric":"cpu_wio",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/disk_free":{
+              "metric":"disk_free",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/disk_total":{
+              "metric":"disk_total",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/read_bps":{
+              "metric":"read_bps",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/write_bps":{
+              "metric":"write_bps",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/load/load_fifteen":{
+              "metric":"load_fifteen",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/load/load_five":{
+              "metric":"load_five",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/load/load_one":{
+              "metric":"load_one",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/memory/mem_buffers":{
+              "metric":"mem_buffers",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/memory/mem_cached":{
+              "metric":"mem_cached",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/memory/mem_free":{
+              "metric":"mem_free",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/memory/mem_shared":{
+              "metric":"mem_shared",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/memory/mem_total":{
+              "metric":"mem_total",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/memory/swap_free":{
+              "metric":"swap_free",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/memory/swap_total":{
+              "metric":"swap_total",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/network/bytes_in":{
+              "metric":"bytes_in",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/network/bytes_out":{
+              "metric":"bytes_out",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/network/pkts_in":{
+              "metric":"pkts_in",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/network/pkts_out":{
+              "metric":"pkts_out",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/process/proc_run":{
+              "metric":"proc_run",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/process/proc_total":{
+              "metric":"proc_total",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/read_count":{
+              "metric":"read_count",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/write_count":{
+              "metric":"write_count",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/read_bytes":{
+              "metric":"read_bytes",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/write_bytes":{
+              "metric":"write_bytes",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/read_time":{
+              "metric":"read_time",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/write_time":{
+              "metric":"write_time",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/hbase/regionserver/mutationsWithoutWALSize": {
+              "metric": "regionserver.Server.mutationsWithoutWALSize",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/slowAppendCount": {
+              "metric": "regionserver.Server.slowAppendCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/disk/part_max_used": {
+              "metric": "part_max_used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheCount": {
+              "metric": "regionserver.Server.blockCacheCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_num_ops": {
+              "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/memHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/threadsRunnable": {
+              "metric": "jvm.JvmMetrics.ThreadsRunnable",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_min": {
+              "metric": "regionserver.Server.Delete_min",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsNew": {
+              "metric": "jvm.JvmMetrics.ThreadsNew",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthorizationFailures": {
+              "metric": "regionserver.RegionServer.authorizationFailures",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_avg_time": {
+              "metric": "regionserver.RegionServer.QueueCallTime_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/boottime": {
+              "metric": "boottime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/writeRequestsCount": {
+              "metric": "regionserver.Server.writeRequestCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/getRequestLatency_min": {
+              "metric": "regionserver.Server.Get_min",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_num_ops": {
+              "metric": "regionserver.RegionServer.ProcessCallTime_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/logError": {
+              "metric": "jvm.JvmMetrics.LogError",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/putRequestLatency_75th_percentile": {
+              "metric": "regionserver.Server.Mutate_75th_percentile",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheHitCount": {
+              "metric": "regionserver.Server.blockCacheHitCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/slowPutCount": {
+              "metric": "regionserver.Server.slowPutCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/regionServerStartup_avg_time": {
+              "metric": "regionserver.Server.regionServerStartTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheSize": {
+              "metric": "regionserver.Server.blockCacheSize",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsBlocked": {
+              "metric": "jvm.JvmMetrics.ThreadsBlocked",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/putRequestLatency_median": {
+              "metric": "regionserver.Server.Mutate_median",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/readRequestsCount": {
+              "metric": "regionserver.Server.readRequestCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/putRequestLatency_min": {
+              "metric": "regionserver.Server.Mutate_min",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/storefileIndexSizeMB": {
+              "metric": "regionserver.Server.storeFileIndexSize",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_median": {
+              "metric": "regionserver.Server.Delete_median",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Server/Get_num_ops": {
+              "metric": "regionserver.Server.Get_num_ops",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Server/ScanNext_num_ops": {
+              "metric": "regionserver.Server.ScanNext_num_ops",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Server/Append_num_ops": {
+              "metric": "regionserver.Server.Append_num_ops",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Server/Delete_num_ops": {
+              "metric": "regionserver.Server.Delete_num_ops",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Server/Mutate_num_ops": {
+              "metric": "regionserver.Server.Mutate_num_ops",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Server/Increment_num_ops": {
+              "metric": "regionserver.Server.Increment_num_ops",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Server/Get_95th_percentile": {
+              "metric": "regionserver.Server.Get_95th_percentile",
+              "unit": "ms",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Server/ScanNext_95th_percentile": {
+              "metric": "regionserver.Server.ScanNext_95th_percentile",
+              "unit": "ms",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Server/Mutate_95th_percentile": {
+              "metric": "regionserver.Server.Mutate_95th_percentile",
+              "unit": "ms",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Server/Increment_95th_percentile": {
+              "metric": "regionserver.Server.Increment_95th_percentile",
+              "unit": "ms",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Server/Append_95th_percentile": {
+              "metric": "regionserver.Server.Append_95th_percentile",
+              "unit": "ms",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Server/Delete_95th_percentile": {
+              "metric": "regionserver.Server.Delete_95th_percentile",
+              "unit": "ms",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Server/percentFilesLocal": {
+              "metric": "regionserver.Server.percentFilesLocal",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Server/updatesBlockedTime": {
+              "metric": "regionserver.Server.updatesBlockedTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/IPC/numOpenConnections": {
+              "metric": "regionserver.RegionServer.numOpenConnections",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/IPC/numActiveHandler": {
+              "metric": "regionserver.RegionServer.numActiveHandler",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/IPC/numCallsInGeneralQueue": {
+              "metric": "regionserver.RegionServer.numCallsInGeneralQueue",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/putRequestLatency_mean": {
+              "metric": "regionserver.Server.Mutate_mean",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_avg_time": {
+              "metric": "regionserver.RegionServer.ProcessCallTime_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+           "metrics/rpc/rpcAuthenticationFailures": {
+              "metric": "regionserver.RegionServer.authenticationFailures",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheHitRatio": {
+              "metric": "regionserver.Server.blockCacheCountHitPercent",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheHitPercent": {
+              "metric": "regionserver.Server.blockCountHitPercent",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheEvictedCount": {
+              "metric": "regionserver.Server.blockCacheEvictionCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/putRequestLatency_99th_percentile": {
+              "metric": "regionserver.Server.Mutate_99th_percentile",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/getRequestLatency_max": {
+              "metric": "regionserver.Server.Get_max",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_avg_time": {
+              "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/logFatal": {
+              "metric": "jvm.JvmMetrics.LogFatal",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/getRequestLatency_99th_percentile": {
+              "metric": "regionserver.Server.Get_99th_percentile",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_avg_time": {
+              "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_99th_percentile": {
+              "metric": "regionserver.Server.Delete_99th_percentile",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/slowIncrementCount": {
+              "metric": "regionserver.Server.slowIncrementCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/compactionQueueSize": {
+              "metric": "regionserver.Server.compactionQueueLength",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/flushTime_num_ops": {
+              "metric": "regionserver.Server.FlushTime_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/getRequestLatency_75th_percentile": {
+              "metric": "regionserver.Server.Get_75th_percentile",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/stores": {
+              "metric": "regionserver.Server.storeCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_num_ops": {
+              "metric": "ugi.UgiMetrics.LoginFailureNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthenticationSuccesses": {
+              "metric": "regionserver.RegionServer.authenticationSuccesses",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/ReceivedBytes": {
+              "metric": "regionserver.RegionServer.receivedBytes",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/gcTimeMillis": {
+              "metric": "jvm.JvmMetrics.GcTimeMillis",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTerminated": {
+              "metric": "jvm.JvmMetrics.ThreadsTerminated",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/putRequestLatency_max": {
+              "metric": "regionserver.Server.Mutate_max",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/getRequestLatency_mean": {
+              "metric": "regionserver.Server.Get_mean",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/regions": {
+              "metric": "regionserver.Server.regionCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheFree": {
+              "metric": "regionserver.Server.blockCacheFreeSize",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheMissCount": {
+              "metric": "regionserver.Server.blockCacheMissCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/flushQueueSize": {
+              "metric": "regionserver.Server.flushQueueLength",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_num_ops": {
+              "metric": "regionserver.RegionServer.QueueCallTime_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_mean": {
+              "metric": "regionserver.Server.Delete_mean",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/cpu/cpu_aidle": {
+              "metric": "cpu_aidle",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/totalStaticIndexSizeKB": {
+              "metric": "regionserver.Server.staticIndexSize",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/mutationsWithoutWALCount": {
+              "metric": "regionserver.Server.mutationsWithoutWALCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/getRequestLatency_median": {
+              "metric": "regionserver.Server.Get_median",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/cpu/cpu_speed": {
+              "metric": "cpu_speed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_max": {
+              "metric": "regionserver.Server.Delete_max",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/SentBytes": {
+              "metric": "regionserver.RegionServer.sentBytes",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/logWarn": {
+              "metric": "jvm.JvmMetrics.LogWarn",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/maxMemoryM": {
+              "metric": "jvm.metrics.maxMemoryM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTimedWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/gcCount": {
+              "metric": "jvm.JvmMetrics.GcCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/flushTime_avg_time": {
+              "metric": "regionserver.Server.FlushTime_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/memHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemHeapUsedM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/threadsWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsWaiting",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/slowGetCount": {
+              "metric": "regionserver.Server.slowGetCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/requests": {
+              "metric": "regionserver.Server.totalRequestCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/storefiles": {
+              "metric": "regionserver.Server.storeFileCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/slowDeleteCount": {
+              "metric": "regionserver.Server.slowDeleteCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logInfo": {
+              "metric": "jvm.JvmMetrics.LogInfo",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/hlogFileCount": {
+              "metric": "regionserver.Server.hlogFileCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_95th_percentile": {
+              "metric": "regionserver.Server.Delete_95th_percentile",
+              "unit": "ms",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/memstoreSize": {
+              "metric": "regionserver.Server.memStoreSize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_75th_percentile": {
+              "metric": "regionserver.Server.Delete_75th_percentile",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthorizationSuccesses": {
+              "metric": "regionserver.RegionServer.authorizationSuccesses",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/totalStaticBloomSizeKB": {
+              "metric": "regionserver.Server.staticBloomSize",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/increment_avg_time": {
+              "metric": "regionserver.Server.Increment_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcCountConcurrentMarkSweep": {
+              "metric": "jvm.JvmMetrics.GcCountConcurrentMarkSweep",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcCountParNew": {
+              "metric": "jvm.JvmMetrics.GcCountParNew",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcTimeMillisConcurrentMarkSweep": {
+              "metric": "jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcTimeMillisParNew": {
+              "metric": "jvm.JvmMetrics.GcTimeMillisParNew",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/MemHeapMaxM": {
+              "metric": "jvm.JvmMetrics.MemHeapMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/MemMaxM": {
+              "metric": "jvm.JvmMetrics.MemMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/MemNonHeapMaxM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/DroppedPubAll": {
+              "metric": "metricssystem.MetricsSystem.DroppedPubAll",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumActiveSinks": {
+              "metric": "metricssystem.MetricsSystem.NumActiveSinks",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumActiveSources": {
+              "metric": "metricssystem.MetricsSystem.NumActiveSources",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumAllSinks": {
+              "metric": "metricssystem.MetricsSystem.NumAllSinks",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumAllSources": {
+              "metric": "metricssystem.MetricsSystem.NumAllSources",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/PublishAvgTime": {
+              "metric": "metricssystem.MetricsSystem.PublishAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/PublishNumOps": {
+              "metric": "metricssystem.MetricsSystem.PublishNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineAvgTime": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineDropped": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineDropped",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineNumOps": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineQsize": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineQsize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/SnapshotAvgTime": {
+              "metric": "metricssystem.MetricsSystem.SnapshotAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/SnapshotNumOps": {
+              "metric": "metricssystem.MetricsSystem.SnapshotNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/numCallsInPriorityQueue": {
+              "metric": "regionserver.RegionServer.numCallsInPriorityQueue",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/numCallsInReplicationQueue": {
+              "metric": "regionserver.RegionServer.numCallsInReplicationQueue",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/ProcessCallTime_75th_percentile": {
+              "metric": "regionserver.RegionServer.ProcessCallTime_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/ProcessCallTime_95th_percentile": {
+              "metric": "regionserver.RegionServer.ProcessCallTime_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/ProcessCallTime_99th_percentile": {
+              "metric": "regionserver.RegionServer.ProcessCallTime_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/ProcessCallTime_max": {
+              "metric": "regionserver.RegionServer.ProcessCallTime_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/ProcessCallTime_mean": {
+              "metric": "regionserver.RegionServer.ProcessCallTime_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/ProcessCallTime_min": {
+              "metric": "regionserver.RegionServer.ProcessCallTime_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/QueueCallTime_75th_percentile": {
+              "metric": "regionserver.RegionServer.QueueCallTime_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/QueueCallTime_95th_percentile": {
+              "metric": "regionserver.RegionServer.QueueCallTime_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/QueueCallTime_99th_percentile": {
+              "metric": "regionserver.RegionServer.QueueCallTime_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/QueueCallTime_max": {
+              "metric": "regionserver.RegionServer.QueueCallTime_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/QueueCallTime_mean": {
+              "metric": "regionserver.RegionServer.QueueCallTime_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/QueueCallTime_min": {
+              "metric": "regionserver.RegionServer.QueueCallTime_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/queueSize": {
+              "metric": "regionserver.RegionServer.queueSize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/TotalCallTime_75th_percentile": {
+              "metric": "regionserver.RegionServer.TotalCallTime_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/TotalCallTime_95th_percentile": {
+              "metric": "regionserver.RegionServer.TotalCallTime_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/TotalCallTime_99th_percentile": {
+              "metric": "regionserver.RegionServer.TotalCallTime_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/TotalCallTime_max": {
+              "metric": "regionserver.RegionServer.TotalCallTime_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/TotalCallTime_mean": {
+              "metric": "regionserver.RegionServer.TotalCallTime_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/TotalCallTime_median": {
+              "metric": "regionserver.RegionServer.TotalCallTime_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/TotalCallTime_min": {
+              "metric": "regionserver.RegionServer.TotalCallTime_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/TotalCallTime_num_ops": {
+              "metric": "regionserver.RegionServer.TotalCallTime_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Replication/sink/ageOfLastAppliedOp": {
+              "metric": "regionserver.Replication.sink.ageOfLastAppliedOp",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Replication/sink/appliedBatches": {
+              "metric": "regionserver.Replication.sink.appliedBatches",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Replication/sink/appliedOps": {
+              "metric": "regionserver.Replication.sink.appliedOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/Append_75th_percentile": {
+              "metric": "regionserver.Server.Append_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/Append_99th_percentile": {
+              "metric": "regionserver.Server.Append_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/Append_max": {
+              "metric": "regionserver.Server.Append_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/Append_mean": {
+              "metric": "regionserver.Server.Append_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/Append_median": {
+              "metric": "regionserver.Server.Append_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/Append_min": {
+              "metric": "regionserver.Server.Append_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/blockCacheExpressHitPercent": {
+              "metric": "regionserver.Server.blockCacheExpressHitPercent",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/blockedRequestCount": {
+              "metric": "regionserver.Server.blockedRequestCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/checkMutateFailedCount": {
+              "metric": "regionserver.Server.checkMutateFailedCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/checkMutatePassedCount": {
+              "metric": "regionserver.Server.checkMutatePassedCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/compactedCellsCount": {
+              "metric": "regionserver.Server.compactedCellsCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/compactedCellsSize": {
+              "metric": "regionserver.Server.compactedCellsSize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/flushedCellsCount": {
+              "metric": "regionserver.Server.flushedCellsCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/flushedCellsSize": {
+              "metric": "regionserver.Server.flushedCellsSize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/FlushTime_75th_percentile": {
+              "metric": "regionserver.Server.FlushTime_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/FlushTime_95th_percentile": {
+              "metric": "regionserver.Server.FlushTime_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/FlushTime_99th_percentile": {
+              "metric": "regionserver.Server.FlushTime_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/FlushTime_max": {
+              "metric": "regionserver.Server.FlushTime_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/FlushTime_mean": {
+              "metric": "regionserver.Server.FlushTime_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/FlushTime_min": {
+              "metric": "regionserver.Server.FlushTime_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/hlogFileSize": {
+              "metric": "regionserver.Server.hlogFileSize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/Increment_75th_percentile": {
+              "metric": "regionserver.Server.Increment_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/Increment_99th_percentile": {
+              "metric": "regionserver.Server.Increment_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/Increment_max": {
+              "metric": "regionserver.Server.Increment_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/Increment_mean": {
+              "metric": "regionserver.Server.Increment_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/Increment_min": {
+              "metric": "regionserver.Server.Increment_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/majorCompactedCellsCount": {
+              "metric": "regionserver.Server.majorCompactedCellsCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/majorCompactedCellsSize": {
+              "metric": "regionserver.Server.majorCompactedCellsSize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/percentFilesLocalSecondaryRegions": {
+              "metric": "regionserver.Server.percentFilesLocalSecondaryRegions",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/Replay_75th_percentile": {
+              "metric": "regionserver.Server.Replay_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/Replay_95th_percentile": {
+              "metric": "regionserver.Server.Replay_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/Replay_99th_percentile": {
+              "metric": "regionserver.Server.Replay_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/Replay_max": {
+              "metric": "regionserver.Server.Replay_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/Replay_mean": {
+              "metric": "regionserver.Server.Replay_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/Replay_median": {
+              "metric": "regionserver.Server.Replay_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/Replay_min": {
+              "metric": "regionserver.Server.Replay_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/Replay_num_ops": {
+              "metric": "regionserver.Server.Replay_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/ScanNext_75th_percentile": {
+              "metric": "regionserver.Server.ScanNext_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/ScanNext_99th_percentile": {
+              "metric": "regionserver.Server.ScanNext_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/ScanNext_max": {
+              "metric": "regionserver.Server.ScanNext_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/ScanNext_mean": {
+              "metric": "regionserver.Server.ScanNext_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/ScanNext_median": {
+              "metric": "regionserver.Server.ScanNext_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/ScanNext_min": {
+              "metric": "regionserver.Server.ScanNext_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/splitQueueLength": {
+              "metric": "regionserver.Server.splitQueueLength",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/splitRequestCount": {
+              "metric": "regionserver.Server.splitRequestCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/splitSuccessCount": {
+              "metric": "regionserver.Server.splitSuccessCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/SplitTime_75th_percentile": {
+              "metric": "regionserver.Server.SplitTime_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/SplitTime_95th_percentile": {
+              "metric": "regionserver.Server.SplitTime_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/SplitTime_99th_percentile": {
+              "metric": "regionserver.Server.SplitTime_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/SplitTime_max": {
+              "metric": "regionserver.Server.SplitTime_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/SplitTime_mean": {
+              "metric": "regionserver.Server.SplitTime_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/SplitTime_median": {
+              "metric": "regionserver.Server.SplitTime_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/SplitTime_min": {
+              "metric": "regionserver.Server.SplitTime_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/SplitTime_num_ops": {
+              "metric": "regionserver.Server.SplitTime_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/storeFileSize": {
+              "metric": "regionserver.Server.storeFileSize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/appendCount": {
+              "metric": "regionserver.WAL.appendCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/AppendSize_75th_percentile": {
+              "metric": "regionserver.WAL.AppendSize_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/AppendSize_95th_percentile": {
+              "metric": "regionserver.WAL.AppendSize_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/AppendSize_99th_percentile": {
+              "metric": "regionserver.WAL.AppendSize_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/AppendSize_max": {
+              "metric": "regionserver.WAL.AppendSize_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/AppendSize_mean": {
+              "metric": "regionserver.WAL.AppendSize_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/AppendSize_median": {
+              "metric": "regionserver.WAL.AppendSize_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/AppendSize_min": {
+              "metric": "regionserver.WAL.AppendSize_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/AppendSize_num_ops": {
+              "metric": "regionserver.WAL.AppendSize_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/AppendTime_75th_percentile": {
+              "metric": "regionserver.WAL.AppendTime_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/AppendTime_95th_percentile": {
+              "metric": "regionserver.WAL.AppendTime_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/AppendTime_99th_percentile": {
+              "metric": "regionserver.WAL.AppendTime_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/AppendTime_max": {
+              "metric": "regionserver.WAL.AppendTime_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/AppendTime_mean": {
+              "metric": "regionserver.WAL.AppendTime_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/AppendTime_median": {
+              "metric": "regionserver.WAL.AppendTime_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/AppendTime_min": {
+              "metric": "regionserver.WAL.AppendTime_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/AppendTime_num_ops": {
+              "metric": "regionserver.WAL.AppendTime_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/lowReplicaRollRequest": {
+              "metric": "regionserver.WAL.lowReplicaRollRequest",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/rollRequest": {
+              "metric": "regionserver.WAL.rollRequest",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/slowAppendCount": {
+              "metric": "regionserver.WAL.slowAppendCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/SyncTime_75th_percentile": {
+              "metric": "regionserver.WAL.SyncTime_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/SyncTime_95th_percentile": {
+              "metric": "regionserver.WAL.SyncTime_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/SyncTime_99th_percentile": {
+              "metric": "regionserver.WAL.SyncTime_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/SyncTime_max": {
+              "metric": "regionserver.WAL.SyncTime_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/SyncTime_mean": {
+              "metric": "regionserver.WAL.SyncTime_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/SyncTime_median": {
+              "metric": "regionserver.WAL.SyncTime_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/SyncTime_min": {
+              "metric": "regionserver.WAL.SyncTime_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/SyncTime_num_ops": {
+              "metric": "regionserver.WAL.SyncTime_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/UgiMetrics/GetGroupsAvgTime": {
+              "metric": "ugi.UgiMetrics.GetGroupsAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/UgiMetrics/GetGroupsNumOps": {
+              "metric": "ugi.UgiMetrics.GetGroupsNumOps",
+              "pointInTime": true,
+              "temporal": true
+            }
+          }
+        }
+      },
+      {
+        "type": "jmx",
+        "metrics": {
+          "default": {
+            "metrics/hbase/regionserver/slowPutCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowPutCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/percentFilesLocal": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.percentFilesLocal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_min": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_min",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/blockCacheFree": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheFreeSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/mutationsWithoutWALSize": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.mutationsWithoutWALSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/blockCacheMissCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheMissCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/flushQueueSize": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.flushQueueLength",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_99th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_99th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_num_ops": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/ScanNext_num_ops": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.ScanNext_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/Increment_num_ops": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Increment_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/Append_num_ops": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Append_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/ScanNext_95th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.ScanNext_95th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/Append_95th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Append_95th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/Increment_95th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Increment_95th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/updatesBlockedTime": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.updatesBlockedTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/IPC/numActiveHandler": {
+              "metric": "Hadoop:service=HBase,name=IPC,sub=IPC.numActiveHandler",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/IPC/numCallsInGeneralQueue": {
+              "metric": "Hadoop:service=HBase,name=IPC,sub=IPC.numCallsInGeneralQueue",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/IPC/numOpenConnections": {
+              "metric": "Hadoop:service=HBase,name=IPC,sub=IPC.numOpenConnections",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/slowAppendCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowAppendCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/blockCacheSize": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_num_ops": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/slowIncrementCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowIncrementCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/blockCacheEvictedCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheEvictionCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_95th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_95th_percentile",
+              "unit": "ms",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_median": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_median",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_mean": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_mean",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/slowGetCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowGetCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/blockCacheCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_75th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_75th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_min": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_min",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/storefileIndexSizeMB": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeFileIndexSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_median": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_median",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_max": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_max",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/totalStaticIndexSizeKB": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.staticIndexSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_num_ops": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_mean": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_mean",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/requests": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.totalRequestCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/storefiles": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeFileCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/mutationsWithoutWALCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.mutationsWithoutWALCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_median": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_median",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/slowDeleteCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowDeleteCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_99th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_99th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/stores": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_min": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_min",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_95th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_95th_percentile",
+              "unit": "ms",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_95th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_95th_percentile",
+              "unit": "ms",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_max": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_max",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_mean": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_mean",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_75th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_75th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_max": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_max",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_75th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_75th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/totalStaticBloomSizeKB": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.staticBloomSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/blockCacheHitCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheHitCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_99th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_99th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            }
+          }
+        }
+      }
+    ],
+    "HostComponent": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "default": {
+            "metrics/cpu/cpu_idle":{
+              "metric":"cpu_idle",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_nice":{
+              "metric":"cpu_nice",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_system":{
+              "metric":"cpu_system",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_user":{
+              "metric":"cpu_user",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_wio":{
+              "metric":"cpu_wio",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/disk_free":{
+              "metric":"disk_free",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/disk_total":{
+              "metric":"disk_total",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/read_bps":{
+              "metric":"read_bps",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/write_bps":{
+              "metric":"write_bps",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/load/load_fifteen":{
+              "metric":"load_fifteen",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/load/load_five":{
+              "metric":"load_five",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/load/load_one":{
+              "metric":"load_one",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_buffers":{
+              "metric":"mem_buffers",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_cached":{
+              "metric":"mem_cached",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_free":{
+              "metric":"mem_free",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_shared":{
+              "metric":"mem_shared",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_total":{
+              "metric":"mem_total",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/swap_free":{
+              "metric":"swap_free",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/swap_total":{
+              "metric":"swap_total",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/network/bytes_in":{
+              "metric":"bytes_in",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/network/bytes_out":{
+              "metric":"bytes_out",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/network/pkts_in":{
+              "metric":"pkts_in",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/network/pkts_out":{
+              "metric":"pkts_out",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/process/proc_run":{
+              "metric":"proc_run",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/process/proc_total":{
+              "metric":"proc_total",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/read_count":{
+              "metric":"read_count",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/write_count":{
+              "metric":"write_count",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/read_bytes":{
+              "metric":"read_bytes",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/write_bytes":{
+              "metric":"write_bytes",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/read_time":{
+              "metric":"read_time",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/write_time":{
+              "metric":"write_time",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/hbase/regionserver/mutationsWithoutWALSize": {
+              "metric": "regionserver.Server.mutationsWithoutWALSize",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_avg_time": {
+              "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_99th_percentile": {
+              "metric": "regionserver.Server.Delete_99th_percentile",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/slowAppendCount": {
+              "metric": "regionserver.Server.slowAppendCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/slowIncrementCount": {
+              "metric": "regionserver.Server.slowIncrementCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/putRequestLatency_95th_percentile": {
+              "metric": "regionserver.Server.Mutate_95th_percentile",
+              "unit": "ms",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/compactionQueueSize": {
+              "metric": "regionserver.Server.compactionQueueLength",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/disk/part_max_used": {
+              "metric": "part_max_used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/flushTime_num_ops": {
+              "metric": "regionserver.Server.FlushTime_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheCount": {
+              "metric": "regionserver.Server.blockCacheCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_num_ops": {
+              "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/getRequestLatency_75th_percentile": {
+              "metric": "regionserver.Server.Get_75th_percentile",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/stores": {
+              "metric": "regionserver.Server.storeCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_num_ops": {
+              "metric": "ugi.UgiMetrics.LoginFailureNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthenticationSuccesses": {
+              "metric": "regionserver.RegionServer.authenticationSuccesses",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/threadsRunnable": {
+              "metric": "jvm.JvmMetrics.ThreadsRunnable",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/threadsNew": {
+              "metric": "jvm.JvmMetrics.ThreadsNew",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_min": {
+              "metric": "regionserver.Server.Delete_min",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthorizationFailures": {
+              "metric": "regionserver.RegionServer.authorizationFailures",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_avg_time": {
+              "metric": "regionserver.RegionServer.QueueCallTime_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/getRequestLatency_num_ops": {
+              "metric": "regionserver.Server.Get_num_ops",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/ReceivedBytes": {
+              "metric": "regionserver.RegionServer.receivedBytes",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/NumOpenConnections": {
+              "metric": "regionserver.RegionServer.NumOpenConnections",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/gcTimeMillis": {
+              "metric": "jvm.JvmMetrics.GcTimeMillis",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTerminated": {
+              "metric": "jvm.JvmMetrics.ThreadsTerminated",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/putRequestLatency_max": {
+              "metric": "regionserver.Server.Mutate_max",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_num_ops": {
+              "metric": "regionserver.Server.Delete_num_ops",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/boottime": {
+              "metric": "boottime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/writeRequestsCount": {
+              "metric": "regionserver.Server.writeRequestCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/getRequestLatency_min": {
+              "metric": "regionserver.Server.Get_min",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_num_ops": {
+              "metric": "regionserver.RegionServer.ProcessCallTime_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/getRequestLatency_mean": {
+              "metric": "regionserver.Server.Get_mean",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logError": {
+              "metric": "jvm.JvmMetrics.LogError",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/putRequestLatency_75th_percentile": {
+              "metric": "regionserver.Server.Mutate_75th_percentile",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/regions": {
+              "metric": "regionserver.Server.regionCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheHitCount": {
+              "metric": "regionserver.Server.blockCacheHitCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/slowPutCount": {
+              "metric": "regionserver.Server.slowPutCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheFree": {
+              "metric": "regionserver.Server.blockCacheFreeSize",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheMissCount": {
+              "metric": "regionserver.Server.blockCacheMissCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/flushQueueSize": {
+              "metric": "regionserver.Server.flushQueueLength",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheSize": {
+              "metric": "regionserver.Server.blockCacheSize",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/putRequestLatency_num_ops": {
+              "metric": "regionserver.Server.Mutate_num_ops",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsBlocked": {
+              "metric": "jvm.JvmMetrics.ThreadsBlocked",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_num_ops": {
+              "metric": "regionserver.RegionServer.QueueCallTime_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/putRequestLatency_median": {
+              "metric": "regionserver.Server.Mutate_median",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_mean": {
+              "metric": "regionserver.Server.Delete_mean",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/cpu/cpu_aidle": {
+              "metric": "cpu_aidle",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/readRequestsCount": {
+              "metric": "regionserver.Server.readRequestCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/putRequestLatency_min": {
+              "metric": "regionserver.Server.Mutate_min",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/storefileIndexSizeMB": {
+              "metric": "regionserver.Server.storeFileIndexSize",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_median": {
+              "metric": "regionserver.Server.Delete_median",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/totalStaticIndexSizeKB": {
+              "metric": "regionserver.Server.staticIndexSize",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/putRequestLatency_mean": {
+              "metric": "regionserver.Server.Mutate_mean",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/mutationsWithoutWALCount": {
+              "metric": "regionserver.Server.mutationsWithoutWALCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/getRequestLatency_median": {
+              "metric": "regionserver.Server.Get_median",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_max": {
+              "metric": "regionserver.Server.Delete_max",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/cpu/cpu_speed": {
+              "metric": "cpu_speed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_avg_time": {
+              "metric": "regionserver.RegionServer.ProcessCallTime_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthenticationFailures": {
+              "metric": "regionserver.RegionServer.authenticationFailures",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/percentFilesLocal": {
+              "metric": "regionserver.Server.percentFilesLocal",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheHitRatio": {
+              "metric": "regionserver.Server.blockCacheCountHitPercent",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/SentBytes": {
+              "metric": "regionserver.RegionServer.SentBytes",
+              "pointInTime": true,
+              "tempora

<TRUNCATED>

[40/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/widgets.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/widgets.json
new file mode 100755
index 0000000..a0f5ba9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/widgets.json
@@ -0,0 +1,510 @@
+{
+  "layouts": [
+    {
+      "layout_name": "default_hbase_dashboard",
+      "display_name": "Standard HBase Dashboard",
+      "section_name": "HBASE_SUMMARY",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "Reads and Writes",
+          "description": "Count of read and write requests on all regions in the cluster.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "regionserver.Server.Get_num_ops._sum",
+              "metric_path": "metrics/hbase/regionserver/Server/Get_num_ops._sum",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "regionserver.Server.ScanNext_num_ops._sum",
+              "metric_path": "metrics/hbase/regionserver/Server/ScanNext_num_ops._sum",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "regionserver.Server.Append_num_ops._sum",
+              "metric_path": "metrics/hbase/regionserver/Server/Append_num_ops._sum",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "regionserver.Server.Delete_num_ops._sum",
+              "metric_path": "metrics/hbase/regionserver/Server/Delete_num_ops._sum",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "regionserver.Server.Increment_num_ops._sum",
+              "metric_path": "metrics/hbase/regionserver/Server/Increment_num_ops._sum",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "regionserver.Server.Mutate_num_ops._sum",
+              "metric_path": "metrics/hbase/regionserver/Server/Mutate_num_ops._sum",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Read Requests",
+              "value": "${regionserver.Server.Get_num_ops._sum + regionserver.Server.ScanNext_num_ops._sum}"
+            },
+            {
+              "name": "Write Requests",
+              "value": "${regionserver.Server.Append_num_ops._sum + regionserver.Server.Delete_num_ops._sum + regionserver.Server.Increment_num_ops._sum + regionserver.Server.Mutate_num_ops._sum}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Read Latency",
+          "description": "maximum of 95% read latency.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "regionserver.Server.Get_95th_percentile._max",
+              "metric_path": "metrics/hbase/regionserver/Server/Get_95th_percentile._max",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "regionserver.Server.ScanNext_95th_percentile._max",
+              "metric_path": "metrics/hbase/regionserver/Server/ScanNext_95th_percentile._max",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Cluster wide maximum of 95% Get Latency",
+              "value": "${regionserver.Server.Get_95th_percentile._max}"
+            },
+            {
+              "name": "Cluster wide maximum of 95% ScanNext Latency",
+              "value": "${regionserver.Server.ScanNext_95th_percentile._max}"
+            }
+          ],
+          "properties": {
+            "display_unit": "ms",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Write Latency",
+          "description": "maximum of 95% write latency.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "regionserver.Server.Mutate_95th_percentile._max",
+              "metric_path": "metrics/hbase/regionserver/Server/Mutate_95th_percentile._max",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "regionserver.Server.Increment_95th_percentile._max",
+              "metric_path": "metrics/hbase/regionserver/Server/Increment_95th_percentile._max",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "regionserver.Server.Append_95th_percentile._max",
+              "metric_path": "metrics/hbase/regionserver/Server/Append_95th_percentile._max",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "regionserver.Server.Delete_95th_percentile._max",
+              "metric_path": "metrics/hbase/regionserver/Server/Delete_95th_percentile._max",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Cluster wide maximum of 95% Mutate Latency",
+              "value": "${regionserver.Server.Mutate_95th_percentile._max}"
+            },
+            {
+              "name": "Cluster wide maximum of 95% Increment Latency",
+              "value": "${regionserver.Server.Increment_95th_percentile._max}"
+            },
+            {
+              "name": "Cluster wide maximum of 95% Append Latency",
+              "value": "${regionserver.Server.Append_95th_percentile._max}"
+            },
+            {
+              "name": "Cluster wide maximum of 95% Delete Latency",
+              "value": "${regionserver.Server.Delete_95th_percentile._max}"
+            }
+          ],
+          "properties": {
+            "display_unit": "ms",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Open Connections",
+          "description": "Count of open connections across all RegionServer. This is indicative of RegionServer load in the cluster.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "regionserver.RegionServer.numOpenConnections._sum",
+              "metric_path": "metrics/hbase/ipc/IPC/numOpenConnections._sum",
+              "category": "",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Open Connections",
+              "value": "${regionserver.RegionServer.numOpenConnections._sum}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Request Handlers",
+          "description": "Count of Active handlers vs count of calls waiting in the general queue.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "regionserver.RegionServer.numActiveHandler._sum",
+              "metric_path": "metrics/hbase/ipc/IPC/numActiveHandler._sum",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "regionserver.RegionServer.numCallsInGeneralQueue._sum",
+              "metric_path": "metrics/hbase/ipc/IPC/numCallsInGeneralQueue._sum",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Active Handlers",
+              "value": "${regionserver.RegionServer.numActiveHandler._sum}"
+            },
+            {
+              "name": "Calls in General Queue",
+              "value": "${regionserver.RegionServer.numCallsInGeneralQueue._sum}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Files Local",
+          "description": "Average percentage of local files to RegionServer in the cluster.",
+          "widget_type": "NUMBER",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "regionserver.Server.percentFilesLocal",
+              "metric_path": "metrics/hbase/regionserver/Server/percentFilesLocal",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Files Local",
+              "value": "${regionserver.Server.percentFilesLocal}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "error_threshold":"25",
+            "warning_threshold": "75"
+          }
+        },
+        {
+          "widget_name": "Blocked Updates",
+          "description": "Number of milliseconds updates have been blocked so the memstore can be flushed.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "regionserver.Server.updatesBlockedTime._sum",
+              "metric_path": "metrics/hbase/regionserver/Server/updatesBlockedTime._sum",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Updates Blocked Time",
+              "value": "${regionserver.Server.updatesBlockedTime._sum}"
+            }
+          ],
+          "properties": {
+            "display_unit": "ms",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Cluster CPU",
+          "description": "Percentage of CPU utilized across all RegionServer hosts.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "cpu_system._sum",
+              "metric_path": "metrics/cpu/cpu_system._sum",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "cpu_user._sum",
+              "metric_path": "metrics/cpu/cpu_user._sum",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "cpu_nice._sum",
+              "metric_path": "metrics/cpu/cpu_nice._sum",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "cpu_idle._sum",
+              "metric_path": "metrics/cpu/cpu_idle._sum",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "cpu_wio._sum",
+              "metric_path": "metrics/cpu/cpu_wio._sum",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "CPU utilization",
+              "value": "${((cpu_system._sum + cpu_user._sum + cpu_nice._sum)/(cpu_system._sum + cpu_user._sum + cpu_nice._sum + cpu_idle._sum + cpu_wio._sum)) * 100}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1",
+            "display_unit": "%"
+          }
+        },
+        {
+          "widget_name": "Cluster Network",
+          "description": "Average of Network IO utilized across all RegionServer hosts.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "pkts_in._avg",
+              "metric_path": "metrics/network/pkts_in._avg",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "pkts_out._avg",
+              "metric_path": "metrics/network/pkts_out._avg",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Packets In",
+              "value": "${pkts_in._avg}"
+            },
+            {
+              "name": "Packets Out",
+              "value": "${pkts_out._avg}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Cluster Disk",
+          "description": "Sum of disk throughput for all RegionServer hosts.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "read_bps._sum",
+              "metric_path": "metrics/disk/read_bps._sum",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            },
+            {
+              "name": "write_bps._sum",
+              "metric_path": "metrics/disk/write_bps._sum",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Read throughput",
+              "value": "${read_bps._sum/1048576}"
+            },
+            {
+              "name": "Write throughput",
+              "value": "${write_bps._sum/1048576}"
+            }
+          ],
+          "properties": {
+            "display_unit": "Mbps",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        }
+      ]
+    },
+    {
+      "layout_name": "default_hbase_heatmap",
+      "display_name": "HBase Heatmaps",
+      "section_name": "HBASE_HEATMAPS",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "HBase Compaction Queue Size",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "Hadoop:service=HBase,name=RegionServer,sub=Server.compactionQueueLength",
+              "metric_path": "metrics/hbase/regionserver/compactionQueueSize",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "HBase Compaction Queue Size",
+              "value": "${Hadoop:service=HBase,name=RegionServer,sub=Server.compactionQueueLength} "
+            }
+          ],
+          "properties": {
+            "display_unit": "",
+            "max_limit": "10"
+          }
+        },
+        {
+          "widget_name": "HBase Memstore Sizes",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=HBase,name=RegionServer,sub=Server.memStoreSize",
+              "metric_path": "metrics/hbase/regionserver/memstoreSize",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "HBase Memstore Sizes",
+              "value": "${Hadoop:service=HBase,name=RegionServer,sub=Server.memStoreSize}"
+            }
+          ],
+          "properties": {
+            "display_unit": "B",
+            "max_limit": "104857600"
+          }
+        },
+        {
+          "widget_name": "HBase Read Request Count",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=HBase,name=RegionServer,sub=Server.readRequestCount",
+              "metric_path": "metrics/hbase/regionserver/readRequestsCount",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "HBase Read Request Count",
+              "value": "${Hadoop:service=HBase,name=RegionServer,sub=Server.readRequestCount}"
+            }
+          ],
+          "properties": {
+            "max_limit": "200"
+          }
+        },
+        {
+          "widget_name": "HBase Write Request Count",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=HBase,name=RegionServer,sub=Server.writeRequestCount",
+              "metric_path": "metrics/hbase/regionserver/writeRequestsCount",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "HBase Write Request Count",
+              "value": "${Hadoop:service=HBase,name=RegionServer,sub=Server.writeRequestCount}"
+            }
+          ],
+          "properties": {
+            "max_limit": "200"
+          }
+        },
+        {
+          "widget_name": "HBase Regions",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=HBase,name=RegionServer,sub=Server.regionCount",
+              "metric_path": "metrics/hbase/regionserver/regions",
+              "service_name": "HBASE",
+              "component_name": "HBASE_REGIONSERVER"
+            }
+          ],
+          "values": [
+            {
+              "name": "HBase Regions",
+              "value": "${Hadoop:service=HBase,name=RegionServer,sub=Server.regionCount}"
+            }
+          ],
+          "properties": {
+            "max_limit": "10"
+          }
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/alerts.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/alerts.json
new file mode 100755
index 0000000..6888a18
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/alerts.json
@@ -0,0 +1,657 @@
+{
+  "HDFS":{
+    "service": [
+      {
+        "name": "datanode_process_percent",
+        "label": "Percent DataNodes Available",
+        "description": "This alert is triggered if the number of down DataNodes in the cluster is greater than the configured critical threshold. It aggregates the results of DataNode process checks.",
+        "interval": 1,
+        "scope": "SERVICE",
+        "enabled": true,
+        "source": {
+          "type": "AGGREGATE",
+          "alert_name": "datanode_process",
+          "reporting": {
+            "ok": {
+              "text": "affected: [{1}], total: [{0}]"
+            },
+            "warning": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 0.1
+            },
+            "critical": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 0.3
+            }
+          }
+        }
+      },
+      {
+        "name": "datanode_storage_percent",
+        "label": "Percent DataNodes With Available Space",
+        "description": "This service-level alert is triggered if the storage on a certain percentage of DataNodes exceeds either the warning or critical threshold values.",
+        "interval": 1,
+        "scope": "SERVICE",
+        "enabled": true,
+        "source": {
+          "type": "AGGREGATE",
+          "alert_name": "datanode_storage",
+          "reporting": {
+            "ok": {
+              "text": "affected: [{1}], total: [{0}]"
+            },
+            "warning": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 0.1
+            },
+            "critical": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 0.3
+            }
+          }
+        }
+      },
+      {
+        "name": "journalnode_process_percent",
+        "label": "Percent JournalNodes Available",
+        "description": "This alert is triggered if the number of down JournalNodes in the cluster is greater than the configured critical threshold. It aggregates the results of JournalNode process checks.",
+        "interval": 1,
+        "scope": "SERVICE",
+        "enabled": true,
+        "source": {
+          "type": "AGGREGATE",
+          "alert_name": "journalnode_process",
+          "reporting": {
+            "ok": {
+              "text": "affected: [{1}], total: [{0}]"
+            },
+            "warning": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 0.33
+            },
+            "critical": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 0.50
+            }
+          }
+        }
+      }
+    ],
+    "NAMENODE": [
+      {
+        "name": "namenode_webui",
+        "label": "NameNode Web UI",
+        "description": "This host-level alert is triggered if the NameNode Web UI is unreachable.",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "WEB",
+          "uri": {
+            "http": "{{hdfs-site/dfs.namenode.http-address}}",
+            "https": "{{hdfs-site/dfs.namenode.https-address}}",
+            "https_property": "{{hdfs-site/dfs.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "kerberos_keytab": "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}",
+            "kerberos_principal": "{{hdfs-site/dfs.web.authentication.kerberos.principal}}",
+            "connection_timeout": 5.0,
+            "high_availability": {
+              "nameservice": "{{hdfs-site/dfs.nameservices}}",
+              "alias_key" : "{{hdfs-site/dfs.ha.namenodes.{{ha-nameservice}}}}",
+              "http_pattern" : "{{hdfs-site/dfs.namenode.http-address.{{ha-nameservice}}.{{alias}}}}",
+              "https_pattern" : "{{hdfs-site/dfs.namenode.https-address.{{ha-nameservice}}.{{alias}}}}"
+            }
+          },
+          "reporting": {
+            "ok": {
+              "text": "HTTP {0} response in {2:.3f}s"
+            },
+            "warning":{
+              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
+            },
+            "critical": {
+              "text": "Connection failed to {1} ({3})"
+            }
+          }
+        }
+      },
+      {
+        "name": "namenode_cpu",
+        "label": "NameNode Host CPU Utilization",
+        "description": "This host-level alert is triggered if CPU utilization of the NameNode exceeds certain warning and critical thresholds. It checks the NameNode JMX Servlet for the SystemCPULoad property. The threshold values are in percent.",
+        "interval": 5,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{hdfs-site/dfs.namenode.http-address}}",
+            "https": "{{hdfs-site/dfs.namenode.https-address}}",
+            "https_property": "{{hdfs-site/dfs.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "high_availability": {
+              "nameservice": "{{hdfs-site/dfs.nameservices}}",
+              "alias_key" : "{{hdfs-site/dfs.ha.namenodes.{{ha-nameservice}}}}",
+              "http_pattern" : "{{hdfs-site/dfs.namenode.http-address.{{ha-nameservice}}.{{alias}}}}",
+              "https_pattern" : "{{hdfs-site/dfs.namenode.https-address.{{ha-nameservice}}.{{alias}}}}"
+            }
+          },
+          "reporting": {
+            "ok": {
+              "text": "{1} CPU, load {0:.1%}"
+            },
+            "warning": {
+              "text": "{1} CPU, load {0:.1%}",
+              "value": 200
+            },
+            "critical": {
+              "text": "{1} CPU, load {0:.1%}",
+              "value": 250
+            },
+            "units" : "%"
+          },
+          "jmx": {
+            "property_list": [
+              "java.lang:type=OperatingSystem/SystemCpuLoad",
+              "java.lang:type=OperatingSystem/AvailableProcessors"
+            ],
+            "value": "{0} * 100"
+          }
+        }
+      },
+      {
+        "name": "namenode_hdfs_blocks_health",
+        "label": "NameNode Blocks Health",
+        "description": "This service-level alert is triggered if the number of corrupt or missing blocks exceeds the configured critical threshold. The threshold values are in blocks.",
+        "interval": 2,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{hdfs-site/dfs.namenode.http-address}}",
+            "https": "{{hdfs-site/dfs.namenode.https-address}}",
+            "https_property": "{{hdfs-site/dfs.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "high_availability": {
+              "nameservice": "{{hdfs-site/dfs.nameservices}}",
+              "alias_key" : "{{hdfs-site/dfs.ha.namenodes.{{ha-nameservice}}}}",
+              "http_pattern" : "{{hdfs-site/dfs.namenode.http-address.{{ha-nameservice}}.{{alias}}}}",
+              "https_pattern" : "{{hdfs-site/dfs.namenode.https-address.{{ha-nameservice}}.{{alias}}}}"
+            }
+          },
+          "reporting": {
+            "ok": {
+              "text": "Total Blocks:[{1}], Missing Blocks:[{0}]"
+            },
+            "warning": {
+              "text": "Total Blocks:[{1}], Missing Blocks:[{0}]",
+              "value": 1
+            },
+            "critical": {
+              "text": "Total Blocks:[{1}], Missing Blocks:[{0}]",
+              "value": 1
+            },
+            "units" : "Blocks"
+          },
+          "jmx": {
+            "property_list": [
+              "Hadoop:service=NameNode,name=FSNamesystem/MissingBlocks",
+              "Hadoop:service=NameNode,name=FSNamesystem/BlocksTotal"
+            ],
+            "value": "{0}"
+          }
+        }
+      },
+      {
+        "name": "namenode_hdfs_capacity_utilization",
+        "label": "HDFS Capacity Utilization",
+        "description": "This service-level alert is triggered if the HDFS capacity utilization exceeds the configured warning and critical thresholds. It checks the NameNode JMX Servlet for the CapacityUsed and CapacityRemaining properties. The threshold values are in percent.",
+        "interval": 2,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{hdfs-site/dfs.namenode.http-address}}",
+            "https": "{{hdfs-site/dfs.namenode.https-address}}",
+            "https_property": "{{hdfs-site/dfs.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "high_availability": {
+              "nameservice": "{{hdfs-site/dfs.nameservices}}",
+              "alias_key" : "{{hdfs-site/dfs.ha.namenodes.{{ha-nameservice}}}}",
+              "http_pattern" : "{{hdfs-site/dfs.namenode.http-address.{{ha-nameservice}}.{{alias}}}}",
+              "https_pattern" : "{{hdfs-site/dfs.namenode.https-address.{{ha-nameservice}}.{{alias}}}}"
+            }
+          },
+          "reporting": {
+            "ok": {
+              "text": "Capacity Used:[{2:.0f}%, {0}], Capacity Remaining:[{1}]"
+            },
+            "warning": {
+              "text": "Capacity Used:[{2:.0f}%, {0}], Capacity Remaining:[{1}]",
+              "value": 80
+            },
+            "critical": {
+              "text": "Capacity Used:[{2:.0f}%, {0}], Capacity Remaining:[{1}]",
+              "value": 90
+            },
+            "units" : "%"
+          },
+          "jmx": {
+            "property_list": [
+              "Hadoop:service=NameNode,name=FSNamesystemState/CapacityUsed",
+              "Hadoop:service=NameNode,name=FSNamesystemState/CapacityRemaining"
+            ],
+            "value": "{0}/({0} + {1}) * 100.0"
+          }
+        }
+      },
+      {
+        "name": "namenode_rpc_latency",
+        "label": "NameNode RPC Latency",
+        "description": "This host-level alert is triggered if the NameNode RPC latency exceeds the configured critical threshold. Typically an increase in the RPC processing time increases the RPC queue length, causing the average queue wait time to increase for NameNode operations. The threshold values are in milliseconds.",
+        "interval": 2,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{hdfs-site/dfs.namenode.http-address}}",
+            "https": "{{hdfs-site/dfs.namenode.https-address}}",
+            "https_property": "{{hdfs-site/dfs.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "high_availability": {
+              "nameservice": "{{hdfs-site/dfs.nameservices}}",
+              "alias_key" : "{{hdfs-site/dfs.ha.namenodes.{{ha-nameservice}}}}",
+              "http_pattern" : "{{hdfs-site/dfs.namenode.http-address.{{ha-nameservice}}.{{alias}}}}",
+              "https_pattern" : "{{hdfs-site/dfs.namenode.https-address.{{ha-nameservice}}.{{alias}}}}"
+            }
+          },
+          "reporting": {
+            "ok": {
+              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]"
+            },
+            "warning": {
+              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]",
+              "value": 3000
+            },
+            "critical": {
+              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]",
+              "value": 5000
+            },
+            "units" : "ms"
+          },
+          "jmx": {
+            "property_list": [
+              "Hadoop:service=NameNode,name=RpcActivityForPort*/RpcQueueTimeAvgTime",
+              "Hadoop:service=NameNode,name=RpcActivityForPort*/RpcProcessingTimeAvgTime"
+            ],
+            "value": "{0}"
+          }
+        }
+      },
+      {
+        "name": "namenode_directory_status",
+        "label": "NameNode Directory Status",
+        "description": "This host-level alert is triggered if the NameNode NameDirStatuses metric (name=NameNodeInfo/NameDirStatuses) reports a failed directory. The threshold values are in the number of directories that are not healthy.",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{hdfs-site/dfs.namenode.http-address}}",
+            "https": "{{hdfs-site/dfs.namenode.https-address}}",
+            "https_property": "{{hdfs-site/dfs.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "high_availability": {
+              "nameservice": "{{hdfs-site/dfs.nameservices}}",
+              "alias_key" : "{{hdfs-site/dfs.ha.namenodes.{{ha-nameservice}}}}",
+              "http_pattern" : "{{hdfs-site/dfs.namenode.http-address.{{ha-nameservice}}.{{alias}}}}",
+              "https_pattern" : "{{hdfs-site/dfs.namenode.https-address.{{ha-nameservice}}.{{alias}}}}"
+            }
+          },
+          "reporting": {
+            "ok": {
+              "text": "Directories are healthy"
+            },
+            "warning": {
+              "text": "Failed directory count: {1}",
+              "value": 1
+            },
+            "critical": {
+              "text": "Failed directory count: {1}",
+              "value": 1
+            },
+            "units" : "Dirs"
+          },
+          "jmx": {
+            "property_list": [
+              "Hadoop:service=NameNode,name=NameNodeInfo/NameDirStatuses"
+            ],
+            "value": "calculate(args)\ndef calculate(args):\n  import json\n  json_statuses = json.loads({0})\n  return len(json_statuses['failed']) if 'failed' in json_statuses else 0"
+          }
+        }
+      },
+      {
+        "name": "datanode_health_summary",
+        "label": "DataNode Health Summary",
+        "description": "This service-level alert is triggered if there are unhealthy DataNodes",
+        "interval": 1,
+        "scope": "SERVICE",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{hdfs-site/dfs.namenode.http-address}}",
+            "https": "{{hdfs-site/dfs.namenode.https-address}}",
+            "https_property": "{{hdfs-site/dfs.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "high_availability": {
+              "nameservice": "{{hdfs-site/dfs.nameservices}}",
+              "alias_key" : "{{hdfs-site/dfs.ha.namenodes.{{ha-nameservice}}}}",
+              "http_pattern" : "{{hdfs-site/dfs.namenode.http-address.{{ha-nameservice}}.{{alias}}}}",
+              "https_pattern" : "{{hdfs-site/dfs.namenode.https-address.{{ha-nameservice}}.{{alias}}}}"
+            }
+          },
+          "reporting": {
+            "ok": {
+              "text": "All {2} DataNode(s) are healthy"
+            },
+            "warning": {
+              "text": "DataNode Health: [Live={2}, Stale={1}, Dead={0}]",
+              "value": 1
+            },
+            "critical": {
+              "text": "DataNode Health: [Live={2}, Stale={1}, Dead={0}]",
+              "value": 1
+            },
+            "units" : "DNs"
+          },
+          "jmx": {
+            "property_list": [
+              "Hadoop:service=NameNode,name=FSNamesystemState/NumDeadDataNodes",
+              "Hadoop:service=NameNode,name=FSNamesystemState/NumStaleDataNodes",
+              "Hadoop:service=NameNode,name=FSNamesystemState/NumLiveDataNodes"
+            ],
+            "value": "{0} + {1}"
+          }
+        }
+      },
+      {
+        "name": "namenode_last_checkpoint",
+        "label": "NameNode Last Checkpoint",
+        "description": "This service-level alert will trigger if the last time that the NameNode performed a checkpoint was too long ago. It will also trigger if the number of uncommitted transactions is beyond a certain threshold.",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "SCRIPT",
+          "path": "HDFS/2.7.1.4.1/package/alerts/alert_checkpoint_time.py",
+          "parameters": [
+            {
+              "name": "connection.timeout",
+              "display_name": "Connection Timeout",
+              "value": 5.0,
+              "type": "NUMERIC",
+              "description": "The maximum time before this alert is considered to be CRITICAL",
+              "units": "seconds",
+              "threshold": "CRITICAL"
+            },
+            {
+              "name": "checkpoint.time.warning.threshold",
+              "display_name": "Checkpoint Warning",
+              "value": 2.0,
+              "type": "PERCENT",
+              "description": "The percentage of the last checkpoint time greater than the interval in order to trigger a warning alert.",
+              "units": "%",
+              "threshold": "WARNING"
+            },
+            {
+              "name": "checkpoint.time.critical.threshold",
+              "display_name": "Checkpoint Critical",
+              "value": 2.0,
+              "type": "PERCENT",
+              "description": "The percentage of the last checkpoint time greater than the interval in order to trigger a critical alert.",
+              "units": "%",
+              "threshold": "CRITICAL"
+            }
+	        ]
+        }
+      },
+      {
+        "name": "namenode_ha_health",
+        "label": "NameNode High Availability Health",
+        "description": "This service-level alert is triggered if either the Active NameNode or Standby NameNode are not running.",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "ignore_host": true,
+        "source": {
+          "type": "SCRIPT",
+          "path": "HDFS/2.7.1.4.1/package/alerts/alert_ha_namenode_health.py",
+          "parameters": [
+            {
+              "name": "connection.timeout",
+              "display_name": "Connection Timeout",
+              "value": 5.0,
+              "type": "NUMERIC",
+              "description": "The maximum time before this alert is considered to be CRITICAL",
+              "units": "seconds",
+              "threshold": "CRITICAL"
+            }
+          ]
+        }
+      }
+    ],
+    "SECONDARY_NAMENODE": [
+      {
+        "name": "secondary_namenode_process",
+        "label": "Secondary NameNode Process",
+        "description": "This host-level alert is triggered if the Secondary NameNode process cannot be confirmed to be up and listening on the network.",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "PORT",
+          "uri": "{{hdfs-site/dfs.namenode.secondary.http-address}}",
+          "default_port": 50071,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ],
+    "NFS_GATEWAY": [
+      {
+        "name": "nfsgateway_process",
+        "label": "NFS Gateway Process",
+        "description": "This host-level alert is triggered if the NFS Gateway process cannot be confirmed to be up and listening on the network.",
+        "interval": 1,
+        "scope": "HOST",
+        "enabled": true,
+        "source": {
+          "type": "PORT",
+          "uri": "{{hdfs-site/nfs.server.port}}",
+          "default_port": 2049,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ],
+    "JOURNALNODE": [
+      {
+        "name": "journalnode_process",
+        "label": "JournalNode Process",
+        "description": "This host-level alert is triggered if the JournalNode process cannot be confirmed to be up and listening on the network.",
+        "interval": 1,
+        "scope": "HOST",
+        "enabled": true,
+        "source": {
+          "type": "PORT",
+          "uri": "{{hdfs-site/dfs.journalnode.http-address}}",
+          "default_port": 8480,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ],
+    "DATANODE": [
+      {
+        "name": "datanode_process",
+        "label": "DataNode Process",
+        "description": "This host-level alert is triggered if the individual DataNode processes cannot be established to be up and listening on the network.",
+        "interval": 1,
+        "scope": "HOST",
+        "enabled": true,
+        "source": {
+          "type": "PORT",
+          "uri": "{{hdfs-site/dfs.datanode.address}}",
+          "default_port": 50010,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      },
+      {
+        "name": "datanode_webui",
+        "label": "DataNode Web UI",
+        "description": "This host-level alert is triggered if the DataNode Web UI is unreachable.",
+        "interval": 1,
+        "scope": "HOST",
+        "enabled": true,
+        "source": {
+          "type": "WEB",
+          "uri": {
+            "http": "{{hdfs-site/dfs.datanode.http.address}}",
+            "https": "{{hdfs-site/dfs.datanode.https.address}}",
+            "https_property": "{{hdfs-site/dfs.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "kerberos_keytab": "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}",
+            "kerberos_principal": "{{hdfs-site/dfs.web.authentication.kerberos.principal}}"
+          },
+          "reporting": {
+            "ok": {
+              "text": "HTTP {0} response in {2:.3f}s"
+            },
+            "warning":{
+              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
+            },
+            "critical": {
+              "text": "Connection failed to {1} ({3})"
+            }
+          }
+        }
+      },
+      {
+        "name": "datanode_storage",
+        "label": "DataNode Storage",
+        "description": "This host-level alert is triggered if storage capacity if full on the DataNode. It checks the DataNode JMX Servlet for the Capacity and Remaining properties. The threshold values are in percent.",
+        "interval": 2,
+        "scope": "HOST",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{hdfs-site/dfs.datanode.http.address}}",
+            "https": "{{hdfs-site/dfs.datanode.https.address}}",
+            "https_property": "{{hdfs-site/dfs.http.policy}}",
+            "https_property_value": "HTTPS_ONLY"
+          },
+          "reporting": {
+            "ok": {
+              "text": "Remaining Capacity:[{0}], Total Capacity:[{2:.0f}% Used, {1}]"
+            },
+            "warning": {
+              "text": "Remaining Capacity:[{0}], Total Capacity:[{2:.0f}% Used, {1}]",
+              "value": 80
+            },
+            "critical": {
+              "text": "Remaining Capacity:[{0}], Total Capacity:[{2:.0f}% Used, {1}]",
+              "value": 90
+            },
+            "units" : "%"
+          },
+          "jmx": {
+            "property_list": [
+              "Hadoop:service=DataNode,name=FSDatasetState-*/Remaining",
+              "Hadoop:service=DataNode,name=FSDatasetState-*/Capacity"
+            ],
+            "value": "({1} - {0})/{1} * 100.0"
+          }
+        }
+      }
+    ],
+    "ZKFC": [
+      {
+        "name": "hdfs_zookeeper_failover_controller_process",
+        "label": "ZooKeeper Failover Controller Process",
+        "description": "This host-level alert is triggered if the ZooKeeper Failover Controller process cannot be confirmed to be up and listening on the network.",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "PORT",
+          "uri": "{{core-site/ha.zookeeper.quorum}}",
+          "default_port": 2181,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/core-site.xml
new file mode 100755
index 0000000..f833896
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/core-site.xml
@@ -0,0 +1,189 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+ <!--
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+
+        http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+ -->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
+
+<!-- i/o properties -->
+
+  <property>
+    <name>io.file.buffer.size</name>
+    <value>131072</value>
+    <description>The size of buffer for use in sequence files.
+  The size of this buffer should probably be a multiple of hardware
+  page size (4096 on Intel x86), and it determines how much data is
+  buffered during read and write operations.</description>
+  </property>
+
+  <property>
+    <name>io.serializations</name>
+    <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
+    <description> A list of comma-delimited serialization classes that can be used for obtaining serializers and deserializers.
+    </description>
+  </property>
+
+  <property>
+    <name>io.compression.codecs</name>
+    <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec</value>
+    <description>A list of the compression codec classes that can be used
+                 for compression/decompression.</description>
+  </property>
+
+<!-- file system properties -->
+
+  <property>
+    <name>fs.defaultFS</name>
+    <!-- cluster variant -->
+    <value>hdfs://localhost:8020</value>
+    <description>The name of the default file system.  Either the
+  literal string "local" or a host:port for NDFS.</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>fs.trash.interval</name>
+    <value>360</value>
+    <description>Number of minutes between trash checkpoints.
+  If zero, the trash feature is disabled.
+  </description>
+  </property>
+
+  <!-- ipc properties: copied from kryptonite configuration -->
+  <property>
+    <name>ipc.client.idlethreshold</name>
+    <value>8000</value>
+    <description>Defines the threshold number of connections after which
+               connections will be inspected for idleness.
+  </description>
+  </property>
+
+  <property>
+    <name>ipc.client.connection.maxidletime</name>
+    <value>30000</value>
+    <description>The maximum time after which a client will bring down the
+               connection to the server.
+  </description>
+  </property>
+
+  <property>
+    <name>ipc.client.connect.max.retries</name>
+    <value>50</value>
+    <description>Defines the maximum number of retries for IPC connections.</description>
+  </property>
+
+  <property>
+    <name>ipc.server.tcpnodelay</name>
+    <value>true</value>
+    <description>Turn on/off Nagle's algorithm for the TCP socket
+      connection on
+      the server. Setting to true disables the algorithm and may
+      decrease latency
+      with a cost of more/smaller packets.
+    </description>
+  </property>
+
+  <!-- Web Interface Configuration -->
+  <property>
+    <name>mapreduce.jobtracker.webinterface.trusted</name>
+    <value>false</value>
+    <description> If set to true, the web interfaces of JT and NN may contain
+                actions, such as kill job, delete file, etc., that should
+                not be exposed to public. Enable this option if the interfaces
+                are only reachable by those who have the right authorization.
+  </description>
+  </property>
+
+ <property>
+   <name>hadoop.security.authentication</name>
+   <value>simple</value>
+   <description>
+   Set the authentication for the cluster. Valid values are: simple or
+   kerberos.
+   </description>
+ </property>
+<property>
+  <name>hadoop.security.authorization</name>
+  <value>false</value>
+  <description>
+     Enable authorization for different protocols.
+  </description>
+</property>
+
+  <property>
+    <name>hadoop.security.auth_to_local</name>
+    <value>
+        RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/
+        RULE:[2:$1@$0](jhs@.*)s/.*/mapred/
+        RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/
+        RULE:[2:$1@$0](hm@.*)s/.*/hbase/
+        RULE:[2:$1@$0](rs@.*)s/.*/hbase/
+        DEFAULT
+    </value>
+<description>The mapping from kerberos principal names to local OS mapreduce.job.user.names.
+  So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
+  "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
+The translations rules have 3 sections:
+      base     filter    substitution
+The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
+
+[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
+[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
+[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
+
+The filter is a regex in parens that must the generated string for the rule to apply.
+
+"(.*%admin)" will take any string that ends in "%admin"
+"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
+
+Finally, the substitution is a sed rule to translate a regex into a fixed string.
+
+"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
+"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
+"s/X/Y/g" replaces all of the "X" in the name with "Y"
+
+So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
+
+RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+DEFAULT
+
+To also translate the names with a second component, you'd make the rules:
+
+RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+RULE:[2:$1@$0](.@ACME.ORG)s/@.//
+DEFAULT
+
+If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
+
+RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
+DEFAULT
+    </description>
+    <value-attributes>
+      <type>multiLine</type>
+    </value-attributes>
+  </property>
+  <property>
+    <name>net.topology.script.file.name</name>
+    <value>/etc/hadoop/conf/topology_script.py</value>
+    <description>
+      Location of topology script used by Hadoop to determine the rack location of nodes.
+    </description>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hadoop-env.xml
new file mode 100755
index 0000000..ba575b0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hadoop-env.xml
@@ -0,0 +1,308 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>hdfs_log_dir_prefix</name>
+    <display-name>Hadoop Log Dir Prefix</display-name>
+    <value>/var/log/hadoop</value>
+    <description>Hadoop Log Dir Prefix</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hadoop_pid_dir_prefix</name>
+    <display-name>Hadoop PID Dir Prefix</display-name>
+    <value>/var/run/hadoop</value>
+    <description>Hadoop PID Dir Prefix</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hadoop_root_logger</name>
+    <value>INFO,RFA</value>
+    <description>Hadoop Root Logger</description>
+    <display-name>Hadoop Root Logger</display-name>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hadoop_heapsize</name>
+    <display-name>Hadoop maximum Java heap size</display-name>
+    <value>1024</value>
+    <description>Hadoop maximum Java heap size</description>
+    <value-attributes>
+      <type>int</type>
+      <unit>MB</unit>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+  <property>
+    <name>namenode_heapsize</name>
+    <value>1024</value>
+    <description>NameNode Java heap size</description>
+    <display-name>NameNode Java heap size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>268435456</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+    </value-attributes>
+  </property>
+  <property>
+    <name>namenode_opt_newsize</name>
+    <value>200</value>
+    <description>NameNode new generation size</description>
+    <display-name>NameNode new generation size</display-name>
+    <depends-on>
+      <property>
+        <type>hadoop-env</type>
+        <name>namenode_heapsize</name>
+      </property>
+    </depends-on>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>16384</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+  <property>
+    <name>namenode_opt_maxnewsize</name>
+    <value>200</value>
+    <description>NameNode maximum new generation size</description>
+    <display-name>NameNode maximum new generation size</display-name>
+    <depends-on>
+      <property>
+        <type>hadoop-env</type>
+        <name>namenode_heapsize</name>
+      </property>
+    </depends-on>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>16384</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+  <property>
+    <name>namenode_opt_permsize</name>
+    <value>128</value>
+    <description>NameNode permanent generation size</description>
+    <display-name>NameNode permanent generation size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>2096</maximum>
+      <unit>MB</unit>
+      <increment-step>128</increment-step>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+  <property>
+    <name>namenode_opt_maxpermsize</name>
+    <value>256</value>
+    <description>NameNode maximum permanent generation size</description>
+    <display-name>NameNode maximum permanent generation size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>2096</maximum>
+      <unit>MB</unit>
+      <increment-step>128</increment-step>
+       <overridable>false</overridable>
+    </value-attributes>
+  </property>
+  <property>
+    <name>dtnode_heapsize</name>
+    <value>1024</value>
+    <description>DataNode maximum Java heap size</description>
+    <display-name>DataNode maximum Java heap size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>268435456</maximum>
+      <unit>MB</unit>
+      <increment-step>128</increment-step>
+    </value-attributes>
+  </property>
+  <property>
+    <name>proxyuser_group</name>
+    <value>users</value>
+    <property-type>GROUP</property-type>
+    <description>Proxy user group.</description>
+  </property>
+  <property>
+    <name>hdfs_user</name>
+    <value>hdfs</value>
+    <property-type>USER</property-type>
+    <description>User to run HDFS as</description>
+  </property>
+  <property>
+    <name>dfs.datanode.data.dir.mount.file</name>
+    <value>/etc/hadoop/conf/dfs_data_dir_mount.hist</value>
+    <description>
+      File path that contains the last known mount point for each data dir. This
+      file is used to avoid creating a DFS data dir on the root drive (and filling it up)
+      if a path was previously mounted on a drive.
+    </description>
+    <display-name>File that stores mount point for each data dir</display-name>
+    <value-attributes>
+      <type>directory</type>
+      <visible>true</visible>
+    </value-attributes>
+  </property>
+
+  <!-- hadoop-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for hadoop-env.sh file</description>
+    <value>
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.  Required.
+export JAVA_HOME={{java_home}}
+export HADOOP_HOME_WARN_SUPPRESS=1
+
+# Hadoop home directory
+export HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+
+# Hadoop Configuration Directory
+#TODO: if env var set that can cause problems
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
+
+{# this is different for HDP1 #}
+# Path to jsvc required by secure HDP 2.0 datanode
+export JSVC_HOME={{jsvc_path}}
+
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+
+export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+
+# Command specific options appended to HADOOP_OPTS when specified
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
+
+HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
+export HADOOP_DATANODE_OPTS="-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
+
+export HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS"
+# On secure datanodes, user to run the datanode as after dropping privileges
+export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}
+
+# Extra ssh options.  Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
+
+# History server logs
+export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from.  Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands.  Unset by default.  This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
+export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# History server pid
+export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
+
+YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes.  See 'man nice'.
+
+# export HADOOP_NICENESS=10
+
+# Use libraries from standard classpath
+JAVA_JDBC_LIBS=""
+#Add libraries required by mysql connector
+for jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by oracle connector
+for jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by nodemanager
+MAPREDUCE_LIBS={{mapreduce_libs_path}}
+export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
+
+#if [ -d "/usr/lib/tez" ]; then
+#  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf
+#fi
+
+# Setting path to hdfs command line
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+
+#Mostly required for hadoop 2.0
+export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/iop/current/hadoop-client/lib/native
+
+#Hadoop logging options. Modify and uncomment to change logging level
+#export HADOOP_ROOT_LOGGER={{hadoop_root_logger}}
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hadoop-policy.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hadoop-policy.xml
new file mode 100755
index 0000000..41bde16
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hadoop-policy.xml
@@ -0,0 +1,134 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration supports_final="true">
+  <property>
+    <name>security.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ClientProtocol, which is used by user code
+    via the DistributedFileSystem.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.client.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
+    for block recovery.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for DatanodeProtocol, which is used by datanodes to
+    communicate with the namenode.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.inter.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
+    for updating generation timestamp.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.namenode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for NamenodeProtocol, the protocol used by the secondary
+    namenode to communicate with the namenode.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.inter.tracker.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for InterTrackerProtocol, used by the tasktrackers to
+    communicate with the jobtracker.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.job.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for JobSubmissionProtocol, used by job clients to
+    communciate with the jobtracker for job submission, querying job status etc.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.job.task.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
+    tasks to communicate with the parent tasktracker.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+ <property>
+    <name>security.admin.operations.protocol.acl</name>
+    <value>hadoop</value>
+    <description>ACL for AdminOperationsProtocol. Used for admin commands.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.refresh.usertogroups.mappings.protocol.acl</name>
+    <value>hadoop</value>
+    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
+    users mappings. The ACL is a comma-separated list of user and
+    group names. The user and group list is separated by a blank. For
+    e.g. "alice,bob users,wheel".  A special value of "*" means all
+    users are allowed.</description>
+  </property>
+
+<property>
+    <name>security.refresh.policy.protocol.acl</name>
+    <value>hadoop</value>
+    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
+    dfsadmin and mradmin commands to refresh the security policy in-effect.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hdfs-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hdfs-log4j.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hdfs-log4j.xml
new file mode 100755
index 0000000..08822eb
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hdfs-log4j.xml
@@ -0,0 +1,201 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <description>Custom log4j.properties</description>
+    <value>
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+
+# Define some default values that can be overridden by system properties
+# To change daemon root logger use hadoop_root_logger in hadoop-env
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+#Security audit appender
+#
+hadoop.security.logger=INFO,console
+hadoop.security.log.maxfilesize=256MB
+hadoop.security.log.maxbackupindex=20
+log4j.category.SecurityLogger=${hadoop.security.logger}
+hadoop.security.log.file=SecurityAuth.audit
+log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
+
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
+
+#
+# hdfs audit logging
+#
+hdfs.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
+log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
+log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# mapred audit logging
+#
+mapred.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
+log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
+log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
+log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# Rolling File Appender
+#
+
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Logfile size and and 30-day backups
+log4j.appender.RFA.MaxFileSize=256MB
+log4j.appender.RFA.MaxBackupIndex=10
+
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+# Custom Logging levels
+
+hadoop.metrics.log.level=INFO
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+#
+# Null Appender
+# Trap security logger on the hadoop client side
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+
+# Removes "deprecated" messages
+log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
+    </value>
+  </property>
+
+</configuration>


[30/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/hive-schema-0.14.0.mysql.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/hive-schema-0.14.0.mysql.sql b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/hive-schema-0.14.0.mysql.sql
new file mode 100755
index 0000000..e0086ab
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/hive-schema-0.14.0.mysql.sql
@@ -0,0 +1,889 @@
+-- MySQL dump 10.13  Distrib 5.5.25, for osx10.6 (i386)
+--
+-- Host: localhost    Database: test
+-- ------------------------------------------------------
+-- Server version	5.5.25
+
+/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
+/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
+/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
+/*!40101 SET NAMES utf8 */;
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+
+--
+-- Table structure for table `BUCKETING_COLS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `BUCKETING_COLS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `BUCKET_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `BUCKETING_COLS_N49` (`SD_ID`),
+  CONSTRAINT `BUCKETING_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `CDS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `CDS` (
+  `CD_ID` bigint(20) NOT NULL,
+  PRIMARY KEY (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `COLUMNS_V2`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `COLUMNS_V2` (
+  `CD_ID` bigint(20) NOT NULL,
+  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TYPE_NAME` varchar(4000) DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`CD_ID`,`COLUMN_NAME`),
+  KEY `COLUMNS_V2_N49` (`CD_ID`),
+  CONSTRAINT `COLUMNS_V2_FK1` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DATABASE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DATABASE_PARAMS` (
+  `DB_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(180) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`DB_ID`,`PARAM_KEY`),
+  KEY `DATABASE_PARAMS_N49` (`DB_ID`),
+  CONSTRAINT `DATABASE_PARAMS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DBS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DBS` (
+  `DB_ID` bigint(20) NOT NULL,
+  `DESC` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `DB_LOCATION_URI` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `OWNER_TYPE` varchar(10) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`DB_ID`),
+  UNIQUE KEY `UNIQUE_DATABASE` (`NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DB_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DB_PRIVS` (
+  `DB_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `DB_ID` bigint(20) DEFAULT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `DB_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`DB_GRANT_ID`),
+  UNIQUE KEY `DBPRIVILEGEINDEX` (`DB_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`DB_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `DB_PRIVS_N49` (`DB_ID`),
+  CONSTRAINT `DB_PRIVS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `GLOBAL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `GLOBAL_PRIVS` (
+  `USER_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `USER_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`USER_GRANT_ID`),
+  UNIQUE KEY `GLOBALPRIVILEGEINDEX` (`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`USER_PRIV`,`GRANTOR`,`GRANTOR_TYPE`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `IDXS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `IDXS` (
+  `INDEX_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `DEFERRED_REBUILD` bit(1) NOT NULL,
+  `INDEX_HANDLER_CLASS` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INDEX_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INDEX_TBL_ID` bigint(20) DEFAULT NULL,
+  `LAST_ACCESS_TIME` int(11) NOT NULL,
+  `ORIG_TBL_ID` bigint(20) DEFAULT NULL,
+  `SD_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`INDEX_ID`),
+  UNIQUE KEY `UNIQUEINDEX` (`INDEX_NAME`,`ORIG_TBL_ID`),
+  KEY `IDXS_N51` (`SD_ID`),
+  KEY `IDXS_N50` (`INDEX_TBL_ID`),
+  KEY `IDXS_N49` (`ORIG_TBL_ID`),
+  CONSTRAINT `IDXS_FK1` FOREIGN KEY (`ORIG_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
+  CONSTRAINT `IDXS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+  CONSTRAINT `IDXS_FK3` FOREIGN KEY (`INDEX_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `INDEX_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `INDEX_PARAMS` (
+  `INDEX_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`INDEX_ID`,`PARAM_KEY`),
+  KEY `INDEX_PARAMS_N49` (`INDEX_ID`),
+  CONSTRAINT `INDEX_PARAMS_FK1` FOREIGN KEY (`INDEX_ID`) REFERENCES `IDXS` (`INDEX_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `NUCLEUS_TABLES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `NUCLEUS_TABLES` (
+  `CLASS_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TYPE` varchar(4) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `OWNER` varchar(2) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `VERSION` varchar(20) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `INTERFACE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`CLASS_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITIONS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITIONS` (
+  `PART_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `LAST_ACCESS_TIME` int(11) NOT NULL,
+  `PART_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `SD_ID` bigint(20) DEFAULT NULL,
+  `TBL_ID` bigint(20) DEFAULT NULL,
+  `LINK_TARGET_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`PART_ID`),
+  UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`),
+  KEY `PARTITIONS_N49` (`TBL_ID`),
+  KEY `PARTITIONS_N50` (`SD_ID`),
+  KEY `PARTITIONS_N51` (`LINK_TARGET_ID`),
+  CONSTRAINT `PARTITIONS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
+  CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+  CONSTRAINT `PARTITIONS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_EVENTS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_EVENTS` (
+  `PART_NAME_ID` bigint(20) NOT NULL,
+  `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `EVENT_TIME` bigint(20) NOT NULL,
+  `EVENT_TYPE` int(11) NOT NULL,
+  `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_NAME_ID`),
+  KEY `PARTITIONEVENTINDEX` (`PARTITION_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_KEYS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_KEYS` (
+  `TBL_ID` bigint(20) NOT NULL,
+  `PKEY_COMMENT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PKEY_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PKEY_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`TBL_ID`,`PKEY_NAME`),
+  KEY `PARTITION_KEYS_N49` (`TBL_ID`),
+  CONSTRAINT `PARTITION_KEYS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_KEY_VALS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_KEY_VALS` (
+  `PART_ID` bigint(20) NOT NULL,
+  `PART_KEY_VAL` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`PART_ID`,`INTEGER_IDX`),
+  KEY `PARTITION_KEY_VALS_N49` (`PART_ID`),
+  CONSTRAINT `PARTITION_KEY_VALS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_PARAMS` (
+  `PART_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_ID`,`PARAM_KEY`),
+  KEY `PARTITION_PARAMS_N49` (`PART_ID`),
+  CONSTRAINT `PARTITION_PARAMS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PART_COL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PART_COL_PRIVS` (
+  `PART_COLUMN_GRANT_ID` bigint(20) NOT NULL,
+  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_ID` bigint(20) DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_COLUMN_GRANT_ID`),
+  KEY `PART_COL_PRIVS_N49` (`PART_ID`),
+  KEY `PARTITIONCOLUMNPRIVILEGEINDEX` (`PART_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  CONSTRAINT `PART_COL_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PART_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PART_PRIVS` (
+  `PART_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_ID` bigint(20) DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_GRANT_ID`),
+  KEY `PARTPRIVILEGEINDEX` (`PART_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `PART_PRIVS_N49` (`PART_ID`),
+  CONSTRAINT `PART_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `ROLES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `ROLES` (
+  `ROLE_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `ROLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`ROLE_ID`),
+  UNIQUE KEY `ROLEENTITYINDEX` (`ROLE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `ROLE_MAP`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `ROLE_MAP` (
+  `ROLE_GRANT_ID` bigint(20) NOT NULL,
+  `ADD_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `ROLE_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`ROLE_GRANT_ID`),
+  UNIQUE KEY `USERROLEMAPINDEX` (`PRINCIPAL_NAME`,`ROLE_ID`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `ROLE_MAP_N49` (`ROLE_ID`),
+  CONSTRAINT `ROLE_MAP_FK1` FOREIGN KEY (`ROLE_ID`) REFERENCES `ROLES` (`ROLE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SDS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SDS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `CD_ID` bigint(20) DEFAULT NULL,
+  `INPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `IS_COMPRESSED` bit(1) NOT NULL,
+  `IS_STOREDASSUBDIRECTORIES` bit(1) NOT NULL,
+  `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `NUM_BUCKETS` int(11) NOT NULL,
+  `OUTPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `SERDE_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`SD_ID`),
+  KEY `SDS_N49` (`SERDE_ID`),
+  KEY `SDS_N50` (`CD_ID`),
+  CONSTRAINT `SDS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`),
+  CONSTRAINT `SDS_FK2` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SD_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SD_PARAMS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SD_ID`,`PARAM_KEY`),
+  KEY `SD_PARAMS_N49` (`SD_ID`),
+  CONSTRAINT `SD_PARAMS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SEQUENCE_TABLE`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SEQUENCE_TABLE` (
+  `SEQUENCE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `NEXT_VAL` bigint(20) NOT NULL,
+  PRIMARY KEY (`SEQUENCE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SERDES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SERDES` (
+  `SERDE_ID` bigint(20) NOT NULL,
+  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `SLIB` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SERDE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SERDE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SERDE_PARAMS` (
+  `SERDE_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SERDE_ID`,`PARAM_KEY`),
+  KEY `SERDE_PARAMS_N49` (`SERDE_ID`),
+  CONSTRAINT `SERDE_PARAMS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_COL_NAMES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_COL_NAMES` (
+  `SD_ID` bigint(20) NOT NULL,
+  `SKEWED_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `SKEWED_COL_NAMES_N49` (`SD_ID`),
+  CONSTRAINT `SKEWED_COL_NAMES_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_COL_VALUE_LOC_MAP`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_COL_VALUE_LOC_MAP` (
+  `SD_ID` bigint(20) NOT NULL,
+  `STRING_LIST_ID_KID` bigint(20) NOT NULL,
+  `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SD_ID`,`STRING_LIST_ID_KID`),
+  KEY `SKEWED_COL_VALUE_LOC_MAP_N49` (`STRING_LIST_ID_KID`),
+  KEY `SKEWED_COL_VALUE_LOC_MAP_N50` (`SD_ID`),
+  CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK2` FOREIGN KEY (`STRING_LIST_ID_KID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
+  CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_STRING_LIST`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST` (
+  `STRING_LIST_ID` bigint(20) NOT NULL,
+  PRIMARY KEY (`STRING_LIST_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_STRING_LIST_VALUES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST_VALUES` (
+  `STRING_LIST_ID` bigint(20) NOT NULL,
+  `STRING_LIST_VALUE` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`STRING_LIST_ID`,`INTEGER_IDX`),
+  KEY `SKEWED_STRING_LIST_VALUES_N49` (`STRING_LIST_ID`),
+  CONSTRAINT `SKEWED_STRING_LIST_VALUES_FK1` FOREIGN KEY (`STRING_LIST_ID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_VALUES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_VALUES` (
+  `SD_ID_OID` bigint(20) NOT NULL,
+  `STRING_LIST_ID_EID` bigint(20) NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID_OID`,`INTEGER_IDX`),
+  KEY `SKEWED_VALUES_N50` (`SD_ID_OID`),
+  KEY `SKEWED_VALUES_N49` (`STRING_LIST_ID_EID`),
+  CONSTRAINT `SKEWED_VALUES_FK2` FOREIGN KEY (`STRING_LIST_ID_EID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
+  CONSTRAINT `SKEWED_VALUES_FK1` FOREIGN KEY (`SD_ID_OID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SORT_COLS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SORT_COLS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `ORDER` int(11) NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `SORT_COLS_N49` (`SD_ID`),
+  CONSTRAINT `SORT_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TABLE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TABLE_PARAMS` (
+  `TBL_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`TBL_ID`,`PARAM_KEY`),
+  KEY `TABLE_PARAMS_N49` (`TBL_ID`),
+  CONSTRAINT `TABLE_PARAMS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBLS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBLS` (
+  `TBL_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `DB_ID` bigint(20) DEFAULT NULL,
+  `LAST_ACCESS_TIME` int(11) NOT NULL,
+  `OWNER` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `RETENTION` int(11) NOT NULL,
+  `SD_ID` bigint(20) DEFAULT NULL,
+  `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `VIEW_EXPANDED_TEXT` mediumtext,
+  `VIEW_ORIGINAL_TEXT` mediumtext,
+  `LINK_TARGET_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`TBL_ID`),
+  UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`),
+  KEY `TBLS_N50` (`SD_ID`),
+  KEY `TBLS_N49` (`DB_ID`),
+  KEY `TBLS_N51` (`LINK_TARGET_ID`),
+  CONSTRAINT `TBLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+  CONSTRAINT `TBLS_FK2` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`),
+  CONSTRAINT `TBLS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBL_COL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBL_COL_PRIVS` (
+  `TBL_COLUMN_GRANT_ID` bigint(20) NOT NULL,
+  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`TBL_COLUMN_GRANT_ID`),
+  KEY `TABLECOLUMNPRIVILEGEINDEX` (`TBL_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `TBL_COL_PRIVS_N49` (`TBL_ID`),
+  CONSTRAINT `TBL_COL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBL_PRIVS` (
+  `TBL_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`TBL_GRANT_ID`),
+  KEY `TBL_PRIVS_N49` (`TBL_ID`),
+  KEY `TABLEPRIVILEGEINDEX` (`TBL_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  CONSTRAINT `TBL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TAB_COL_STATS`
+--
+CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` (
+ `CS_ID` bigint(20) NOT NULL,
+ `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TBL_ID` bigint(20) NOT NULL,
+ `LONG_LOW_VALUE` bigint(20),
+ `LONG_HIGH_VALUE` bigint(20),
+ `DOUBLE_HIGH_VALUE` double(53,4),
+ `DOUBLE_LOW_VALUE` double(53,4),
+ `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `NUM_NULLS` bigint(20) NOT NULL,
+ `NUM_DISTINCTS` bigint(20),
+ `AVG_COL_LEN` double(53,4),
+ `MAX_COL_LEN` bigint(20),
+ `NUM_TRUES` bigint(20),
+ `NUM_FALSES` bigint(20),
+ `LAST_ANALYZED` bigint(20) NOT NULL,
+  PRIMARY KEY (`CS_ID`),
+  CONSTRAINT `TAB_COL_STATS_FK` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for table `PART_COL_STATS`
+--
+CREATE TABLE IF NOT EXISTS `PART_COL_STATS` (
+ `CS_ID` bigint(20) NOT NULL,
+ `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PART_ID` bigint(20) NOT NULL,
+ `LONG_LOW_VALUE` bigint(20),
+ `LONG_HIGH_VALUE` bigint(20),
+ `DOUBLE_HIGH_VALUE` double(53,4),
+ `DOUBLE_LOW_VALUE` double(53,4),
+ `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `NUM_NULLS` bigint(20) NOT NULL,
+ `NUM_DISTINCTS` bigint(20),
+ `AVG_COL_LEN` double(53,4),
+ `MAX_COL_LEN` bigint(20),
+ `NUM_TRUES` bigint(20),
+ `NUM_FALSES` bigint(20),
+ `LAST_ANALYZED` bigint(20) NOT NULL,
+  PRIMARY KEY (`CS_ID`),
+  CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME) USING BTREE;
+
+--
+-- Table structure for table `TYPES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TYPES` (
+  `TYPES_ID` bigint(20) NOT NULL,
+  `TYPE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TYPE1` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TYPE2` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`TYPES_ID`),
+  UNIQUE KEY `UNIQUE_TYPE` (`TYPE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TYPE_FIELDS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TYPE_FIELDS` (
+  `TYPE_NAME` bigint(20) NOT NULL,
+  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `FIELD_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `FIELD_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`TYPE_NAME`,`FIELD_NAME`),
+  KEY `TYPE_FIELDS_N49` (`TYPE_NAME`),
+  CONSTRAINT `TYPE_FIELDS_FK1` FOREIGN KEY (`TYPE_NAME`) REFERENCES `TYPES` (`TYPES_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+-- Table `MASTER_KEYS` for classes [org.apache.hadoop.hive.metastore.model.MMasterKey]
+CREATE TABLE IF NOT EXISTS `MASTER_KEYS`
+(
+    `KEY_ID` INTEGER NOT NULL AUTO_INCREMENT,
+    `MASTER_KEY` VARCHAR(767) BINARY NULL,
+    PRIMARY KEY (`KEY_ID`)
+) ENGINE=INNODB DEFAULT CHARSET=latin1;
+
+-- Table `DELEGATION_TOKENS` for classes [org.apache.hadoop.hive.metastore.model.MDelegationToken]
+CREATE TABLE IF NOT EXISTS `DELEGATION_TOKENS`
+(
+    `TOKEN_IDENT` VARCHAR(767) BINARY NOT NULL,
+    `TOKEN` VARCHAR(767) BINARY NULL,
+    PRIMARY KEY (`TOKEN_IDENT`)
+) ENGINE=INNODB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for VERSION
+--
+CREATE TABLE IF NOT EXISTS `VERSION` (
+  `VER_ID` BIGINT NOT NULL,
+  `SCHEMA_VERSION` VARCHAR(127) NOT NULL,
+  `VERSION_COMMENT` VARCHAR(255),
+  PRIMARY KEY (`VER_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for table FUNCS
+--
+CREATE TABLE IF NOT EXISTS `FUNCS` (
+  `FUNC_ID` BIGINT(20) NOT NULL,
+  `CLASS_NAME` VARCHAR(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+  `CREATE_TIME` INT(11) NOT NULL,
+  `DB_ID` BIGINT(20),
+  `FUNC_NAME` VARCHAR(128) CHARACTER SET latin1 COLLATE latin1_bin,
+  `FUNC_TYPE` INT(11) NOT NULL,
+  `OWNER_NAME` VARCHAR(128) CHARACTER SET latin1 COLLATE latin1_bin,
+  `OWNER_TYPE` VARCHAR(10) CHARACTER SET latin1 COLLATE latin1_bin,
+  PRIMARY KEY (`FUNC_ID`),
+  UNIQUE KEY `UNIQUEFUNCTION` (`FUNC_NAME`, `DB_ID`),
+  KEY `FUNCS_N49` (`DB_ID`),
+  CONSTRAINT `FUNCS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for table FUNC_RU
+--
+CREATE TABLE IF NOT EXISTS `FUNC_RU` (
+  `FUNC_ID` BIGINT(20) NOT NULL,
+  `RESOURCE_TYPE` INT(11) NOT NULL,
+  `RESOURCE_URI` VARCHAR(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+  `INTEGER_IDX` INT(11) NOT NULL,
+  PRIMARY KEY (`FUNC_ID`, `INTEGER_IDX`),
+  CONSTRAINT `FUNC_RU_FK1` FOREIGN KEY (`FUNC_ID`) REFERENCES `FUNCS` (`FUNC_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+
+-- ----------------------------
+-- Transaction and Lock Tables
+-- These are not part of package jdo, so if you are going to regenerate this file you need to manually add the following section back to the file.
+-- ----------------------------
+CREATE TABLE TXNS (
+  TXN_ID bigint PRIMARY KEY,
+  TXN_STATE char(1) NOT NULL,
+  TXN_STARTED bigint NOT NULL,
+  TXN_LAST_HEARTBEAT bigint NOT NULL,
+  TXN_USER varchar(128) NOT NULL,
+  TXN_HOST varchar(128) NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE TXN_COMPONENTS (
+  TC_TXNID bigint,
+  TC_DATABASE varchar(128) NOT NULL,
+  TC_TABLE varchar(128),
+  TC_PARTITION varchar(767),
+  FOREIGN KEY (TC_TXNID) REFERENCES TXNS (TXN_ID)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE COMPLETED_TXN_COMPONENTS (
+  CTC_TXNID bigint,
+  CTC_DATABASE varchar(128) NOT NULL,
+  CTC_TABLE varchar(128),
+  CTC_PARTITION varchar(767)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE NEXT_TXN_ID (
+  NTXN_NEXT bigint NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+INSERT INTO NEXT_TXN_ID VALUES(1);
+
+CREATE TABLE HIVE_LOCKS (
+  HL_LOCK_EXT_ID bigint NOT NULL,
+  HL_LOCK_INT_ID bigint NOT NULL,
+  HL_TXNID bigint,
+  HL_DB varchar(128) NOT NULL,
+  HL_TABLE varchar(128),
+  HL_PARTITION varchar(767),
+  HL_LOCK_STATE char(1) not null,
+  HL_LOCK_TYPE char(1) not null,
+  HL_LAST_HEARTBEAT bigint NOT NULL,
+  HL_ACQUIRED_AT bigint,
+  HL_USER varchar(128) NOT NULL,
+  HL_HOST varchar(128) NOT NULL,
+  PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID),
+  KEY HIVE_LOCK_TXNID_INDEX (HL_TXNID)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE INDEX HL_TXNID_IDX ON HIVE_LOCKS (HL_TXNID);
+
+CREATE TABLE NEXT_LOCK_ID (
+  NL_NEXT bigint NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+INSERT INTO NEXT_LOCK_ID VALUES(1);
+
+CREATE TABLE COMPACTION_QUEUE (
+  CQ_ID bigint PRIMARY KEY,
+  CQ_DATABASE varchar(128) NOT NULL,
+  CQ_TABLE varchar(128) NOT NULL,
+  CQ_PARTITION varchar(767),
+  CQ_STATE char(1) NOT NULL,
+  CQ_TYPE char(1) NOT NULL,
+  CQ_WORKER_ID varchar(128),
+  CQ_START bigint,
+  CQ_RUN_AS varchar(128)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
+  NCQ_NEXT bigint NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+
+-- -----------------------------------------------------------------
+-- Record schema version. Should be the last step in the init script
+-- -----------------------------------------------------------------
+INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '0.14.0', 'Hive release version 0.14.0');
+
+/*!40101 SET character_set_client = @saved_cs_client */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
+/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
+/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2012-08-23  0:56:31

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/hive-schema-0.14.0.oracle.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/hive-schema-0.14.0.oracle.sql b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/hive-schema-0.14.0.oracle.sql
new file mode 100755
index 0000000..1450f9b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/hive-schema-0.14.0.oracle.sql
@@ -0,0 +1,833 @@
+-- Table SEQUENCE_TABLE is an internal table required by DataNucleus.
+-- NOTE: Some versions of SchemaTool do not automatically generate this table.
+-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+CREATE TABLE SEQUENCE_TABLE
+(
+   SEQUENCE_NAME VARCHAR2(255) NOT NULL,
+   NEXT_VAL NUMBER NOT NULL
+);
+
+ALTER TABLE SEQUENCE_TABLE ADD CONSTRAINT PART_TABLE_PK PRIMARY KEY (SEQUENCE_NAME);
+
+-- Table NUCLEUS_TABLES is an internal table required by DataNucleus.
+-- This table is required if datanucleus.autoStartMechanism=SchemaTable
+-- NOTE: Some versions of SchemaTool do not automatically generate this table.
+-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+CREATE TABLE NUCLEUS_TABLES
+(
+   CLASS_NAME VARCHAR2(128) NOT NULL,
+   TABLE_NAME VARCHAR2(128) NOT NULL,
+   TYPE VARCHAR2(4) NOT NULL,
+   OWNER VARCHAR2(2) NOT NULL,
+   VERSION VARCHAR2(20) NOT NULL,
+   INTERFACE_NAME VARCHAR2(255) NULL
+);
+
+ALTER TABLE NUCLEUS_TABLES ADD CONSTRAINT NUCLEUS_TABLES_PK PRIMARY KEY (CLASS_NAME);
+
+-- Table PART_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+CREATE TABLE PART_COL_PRIVS
+(
+    PART_COLUMN_GRANT_ID NUMBER NOT NULL,
+    "COLUMN_NAME" VARCHAR2(128) NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PART_ID NUMBER NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    PART_COL_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY (PART_COLUMN_GRANT_ID);
+
+-- Table CDS.
+CREATE TABLE CDS
+(
+    CD_ID NUMBER NOT NULL
+);
+
+ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID);
+
+-- Table COLUMNS_V2 for join relationship
+CREATE TABLE COLUMNS_V2
+(
+    CD_ID NUMBER NOT NULL,
+    "COMMENT" VARCHAR2(256) NULL,
+    "COLUMN_NAME" VARCHAR2(128) NOT NULL,
+    TYPE_NAME VARCHAR2(4000) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_PK PRIMARY KEY (CD_ID,"COLUMN_NAME");
+
+-- Table PARTITION_KEY_VALS for join relationship
+CREATE TABLE PARTITION_KEY_VALS
+(
+    PART_ID NUMBER NOT NULL,
+    PART_KEY_VAL VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY (PART_ID,INTEGER_IDX);
+
+-- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase]
+CREATE TABLE DBS
+(
+    DB_ID NUMBER NOT NULL,
+    "DESC" VARCHAR2(4000) NULL,
+    DB_LOCATION_URI VARCHAR2(4000) NOT NULL,
+    "NAME" VARCHAR2(128) NULL,
+    OWNER_NAME VARCHAR2(128) NULL,
+    OWNER_TYPE VARCHAR2(10) NULL
+);
+
+ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
+
+-- Table PARTITION_PARAMS for join relationship
+CREATE TABLE PARTITION_PARAMS
+(
+    PART_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PART_ID,PARAM_KEY);
+
+-- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+CREATE TABLE SERDES
+(
+    SERDE_ID NUMBER NOT NULL,
+    "NAME" VARCHAR2(128) NULL,
+    SLIB VARCHAR2(4000) NULL
+);
+
+ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
+
+-- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType]
+CREATE TABLE TYPES
+(
+    TYPES_ID NUMBER NOT NULL,
+    TYPE_NAME VARCHAR2(128) NULL,
+    TYPE1 VARCHAR2(767) NULL,
+    TYPE2 VARCHAR2(767) NULL
+);
+
+ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID);
+
+-- Table PARTITION_KEYS for join relationship
+CREATE TABLE PARTITION_KEYS
+(
+    TBL_ID NUMBER NOT NULL,
+    PKEY_COMMENT VARCHAR2(4000) NULL,
+    PKEY_NAME VARCHAR2(128) NOT NULL,
+    PKEY_TYPE VARCHAR2(767) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEY_PK PRIMARY KEY (TBL_ID,PKEY_NAME);
+
+-- Table ROLES for classes [org.apache.hadoop.hive.metastore.model.MRole]
+CREATE TABLE ROLES
+(
+    ROLE_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    OWNER_NAME VARCHAR2(128) NULL,
+    ROLE_NAME VARCHAR2(128) NULL
+);
+
+ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID);
+
+-- Table PARTITIONS for classes [org.apache.hadoop.hive.metastore.model.MPartition]
+CREATE TABLE PARTITIONS
+(
+    PART_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+    PART_NAME VARCHAR2(767) NULL,
+    SD_ID NUMBER NULL,
+    TBL_ID NUMBER NULL
+);
+
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
+
+-- Table INDEX_PARAMS for join relationship
+CREATE TABLE INDEX_PARAMS
+(
+    INDEX_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PARAM_KEY);
+
+-- Table TBL_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+CREATE TABLE TBL_COL_PRIVS
+(
+    TBL_COLUMN_GRANT_ID NUMBER NOT NULL,
+    "COLUMN_NAME" VARCHAR2(128) NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    TBL_COL_PRIV VARCHAR2(128) NULL,
+    TBL_ID NUMBER NULL
+);
+
+ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_PK PRIMARY KEY (TBL_COLUMN_GRANT_ID);
+
+-- Table IDXS for classes [org.apache.hadoop.hive.metastore.model.MIndex]
+CREATE TABLE IDXS
+(
+    INDEX_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    DEFERRED_REBUILD NUMBER(1) NOT NULL CHECK (DEFERRED_REBUILD IN (1,0)),
+    INDEX_HANDLER_CLASS VARCHAR2(4000) NULL,
+    INDEX_NAME VARCHAR2(128) NULL,
+    INDEX_TBL_ID NUMBER NULL,
+    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+    ORIG_TBL_ID NUMBER NULL,
+    SD_ID NUMBER NULL
+);
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_PK PRIMARY KEY (INDEX_ID);
+
+-- Table BUCKETING_COLS for join relationship
+CREATE TABLE BUCKETING_COLS
+(
+    SD_ID NUMBER NOT NULL,
+    BUCKET_COL_NAME VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+-- Table TYPE_FIELDS for join relationship
+CREATE TABLE TYPE_FIELDS
+(
+    TYPE_NAME NUMBER NOT NULL,
+    "COMMENT" VARCHAR2(256) NULL,
+    FIELD_NAME VARCHAR2(128) NOT NULL,
+    FIELD_TYPE VARCHAR2(767) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_PK PRIMARY KEY (TYPE_NAME,FIELD_NAME);
+
+-- Table SD_PARAMS for join relationship
+CREATE TABLE SD_PARAMS
+(
+    SD_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY);
+
+-- Table GLOBAL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+CREATE TABLE GLOBAL_PRIVS
+(
+    USER_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    USER_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_ID);
+
+-- Table SDS for classes [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+CREATE TABLE SDS
+(
+    SD_ID NUMBER NOT NULL,
+    CD_ID NUMBER NULL,
+    INPUT_FORMAT VARCHAR2(4000) NULL,
+    IS_COMPRESSED NUMBER(1) NOT NULL CHECK (IS_COMPRESSED IN (1,0)),
+    LOCATION VARCHAR2(4000) NULL,
+    NUM_BUCKETS NUMBER (10) NOT NULL,
+    OUTPUT_FORMAT VARCHAR2(4000) NULL,
+    SERDE_ID NUMBER NULL,
+    IS_STOREDASSUBDIRECTORIES NUMBER(1) NOT NULL CHECK (IS_STOREDASSUBDIRECTORIES IN (1,0))
+);
+
+ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID);
+
+-- Table TABLE_PARAMS for join relationship
+CREATE TABLE TABLE_PARAMS
+(
+    TBL_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY);
+
+-- Table SORT_COLS for join relationship
+CREATE TABLE SORT_COLS
+(
+    SD_ID NUMBER NOT NULL,
+    "COLUMN_NAME" VARCHAR2(128) NULL,
+    "ORDER" NUMBER (10) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+-- Table TBL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+CREATE TABLE TBL_PRIVS
+(
+    TBL_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    TBL_PRIV VARCHAR2(128) NULL,
+    TBL_ID NUMBER NULL
+);
+
+ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_PK PRIMARY KEY (TBL_GRANT_ID);
+
+-- Table DATABASE_PARAMS for join relationship
+CREATE TABLE DATABASE_PARAMS
+(
+    DB_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(180) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID,PARAM_KEY);
+
+-- Table ROLE_MAP for classes [org.apache.hadoop.hive.metastore.model.MRoleMap]
+CREATE TABLE ROLE_MAP
+(
+    ROLE_GRANT_ID NUMBER NOT NULL,
+    ADD_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    ROLE_ID NUMBER NULL
+);
+
+ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_PK PRIMARY KEY (ROLE_GRANT_ID);
+
+-- Table SERDE_PARAMS for join relationship
+CREATE TABLE SERDE_PARAMS
+(
+    SERDE_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY);
+
+-- Table PART_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+CREATE TABLE PART_PRIVS
+(
+    PART_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PART_ID NUMBER NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    PART_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_PK PRIMARY KEY (PART_GRANT_ID);
+
+-- Table DB_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+CREATE TABLE DB_PRIVS
+(
+    DB_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    DB_ID NUMBER NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    DB_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_PK PRIMARY KEY (DB_GRANT_ID);
+
+-- Table TBLS for classes [org.apache.hadoop.hive.metastore.model.MTable]
+CREATE TABLE TBLS
+(
+    TBL_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    DB_ID NUMBER NULL,
+    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+    OWNER VARCHAR2(767) NULL,
+    RETENTION NUMBER (10) NOT NULL,
+    SD_ID NUMBER NULL,
+    TBL_NAME VARCHAR2(128) NULL,
+    TBL_TYPE VARCHAR2(128) NULL,
+    VIEW_EXPANDED_TEXT CLOB NULL,
+    VIEW_ORIGINAL_TEXT CLOB NULL
+);
+
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
+
+-- Table PARTITION_EVENTS for classes [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+CREATE TABLE PARTITION_EVENTS
+(
+    PART_NAME_ID NUMBER NOT NULL,
+    DB_NAME VARCHAR2(128) NULL,
+    EVENT_TIME NUMBER NOT NULL,
+    EVENT_TYPE NUMBER (10) NOT NULL,
+    PARTITION_NAME VARCHAR2(767) NULL,
+    TBL_NAME VARCHAR2(128) NULL
+);
+
+ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID);
+
+-- Table SKEWED_STRING_LIST for classes [org.apache.hadoop.hive.metastore.model.MStringList]
+CREATE TABLE SKEWED_STRING_LIST
+(
+    STRING_LIST_ID NUMBER NOT NULL
+);
+
+ALTER TABLE SKEWED_STRING_LIST ADD CONSTRAINT SKEWED_STRING_LIST_PK PRIMARY KEY (STRING_LIST_ID);
+
+CREATE TABLE SKEWED_STRING_LIST_VALUES
+(
+    STRING_LIST_ID NUMBER NOT NULL,
+    "STRING_LIST_VALUE" VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_PK PRIMARY KEY (STRING_LIST_ID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_COL_NAMES
+(
+    SD_ID NUMBER NOT NULL,
+    "SKEWED_COL_NAME" VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_COL_VALUE_LOC_MAP
+(
+    SD_ID NUMBER NOT NULL,
+    STRING_LIST_ID_KID NUMBER NOT NULL,
+    "LOCATION" VARCHAR2(4000) NULL
+);
+
+CREATE TABLE MASTER_KEYS
+(
+    KEY_ID NUMBER (10) NOT NULL,
+    MASTER_KEY VARCHAR2(767) NULL
+);
+
+CREATE TABLE DELEGATION_TOKENS
+(
+    TOKEN_IDENT VARCHAR2(767) NOT NULL,
+    TOKEN VARCHAR2(767) NULL
+);
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK PRIMARY KEY (SD_ID,STRING_LIST_ID_KID);
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK1 FOREIGN KEY (STRING_LIST_ID_KID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_VALUES
+(
+    SD_ID_OID NUMBER NOT NULL,
+    STRING_LIST_ID_EID NUMBER NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_PK PRIMARY KEY (SD_ID_OID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID_EID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK2 FOREIGN KEY (SD_ID_OID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+-- column statistics
+
+CREATE TABLE TAB_COL_STATS (
+ CS_ID NUMBER NOT NULL,
+ DB_NAME VARCHAR2(128) NOT NULL,
+ TABLE_NAME VARCHAR2(128) NOT NULL,
+ COLUMN_NAME VARCHAR2(128) NOT NULL,
+ COLUMN_TYPE VARCHAR2(128) NOT NULL,
+ TBL_ID NUMBER NOT NULL,
+ LONG_LOW_VALUE NUMBER,
+ LONG_HIGH_VALUE NUMBER,
+ DOUBLE_LOW_VALUE NUMBER,
+ DOUBLE_HIGH_VALUE NUMBER,
+ BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
+ BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
+ NUM_NULLS NUMBER NOT NULL,
+ NUM_DISTINCTS NUMBER,
+ AVG_COL_LEN NUMBER,
+ MAX_COL_LEN NUMBER,
+ NUM_TRUES NUMBER,
+ NUM_FALSES NUMBER,
+ LAST_ANALYZED NUMBER NOT NULL
+);
+
+CREATE TABLE VERSION (
+  VER_ID NUMBER NOT NULL,
+  SCHEMA_VERSION VARCHAR(127) NOT NULL,
+  VERSION_COMMENT VARCHAR(255)
+);
+ALTER TABLE VERSION ADD CONSTRAINT VERSION_PK PRIMARY KEY (VER_ID);
+
+ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PKEY PRIMARY KEY (CS_ID);
+
+ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS(TBL_ID);
+
+CREATE TABLE PART_COL_STATS (
+ CS_ID NUMBER NOT NULL,
+ DB_NAME VARCHAR2(128) NOT NULL,
+ TABLE_NAME VARCHAR2(128) NOT NULL,
+ PARTITION_NAME VARCHAR2(767) NOT NULL,
+ COLUMN_NAME VARCHAR2(128) NOT NULL,
+ COLUMN_TYPE VARCHAR2(128) NOT NULL,
+ PART_ID NUMBER NOT NULL,
+ LONG_LOW_VALUE NUMBER,
+ LONG_HIGH_VALUE NUMBER,
+ DOUBLE_LOW_VALUE NUMBER,
+ DOUBLE_HIGH_VALUE NUMBER,
+ BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
+ BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
+ NUM_NULLS NUMBER NOT NULL,
+ NUM_DISTINCTS NUMBER,
+ AVG_COL_LEN NUMBER,
+ MAX_COL_LEN NUMBER,
+ NUM_TRUES NUMBER,
+ NUM_FALSES NUMBER,
+ LAST_ANALYZED NUMBER NOT NULL
+);
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PKEY PRIMARY KEY (CS_ID);
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED;
+
+CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);
+
+CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME);
+
+CREATE TABLE FUNCS (
+  FUNC_ID NUMBER NOT NULL,
+  CLASS_NAME VARCHAR2(4000),
+  CREATE_TIME NUMBER(10) NOT NULL,
+  DB_ID NUMBER,
+  FUNC_NAME VARCHAR2(128),
+  FUNC_TYPE NUMBER(10) NOT NULL,
+  OWNER_NAME VARCHAR2(128),
+  OWNER_TYPE VARCHAR2(10)
+);
+
+ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_PK PRIMARY KEY (FUNC_ID);
+
+CREATE TABLE FUNC_RU (
+  FUNC_ID NUMBER NOT NULL,
+  RESOURCE_TYPE NUMBER(10) NOT NULL,
+  RESOURCE_URI VARCHAR2(4000),
+  INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_PK PRIMARY KEY (FUNC_ID, INTEGER_IDX);
+
+
+-- Constraints for table PART_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PART_COL_PRIVS_N49 ON PART_COL_PRIVS (PART_ID);
+
+CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table COLUMNS_V2
+ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_FK1 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX COLUMNS_V2_N49 ON COLUMNS_V2 (CD_ID);
+
+
+-- Constraints for table PARTITION_KEY_VALS
+ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_KEY_VALS_N49 ON PARTITION_KEY_VALS (PART_ID);
+
+
+-- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase]
+CREATE UNIQUE INDEX UNIQUE_DATABASE ON DBS ("NAME");
+
+
+-- Constraints for table PARTITION_PARAMS
+ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_PARAMS_N49 ON PARTITION_PARAMS (PART_ID);
+
+
+-- Constraints for table SERDES for class(es) [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+
+-- Constraints for table TYPES for class(es) [org.apache.hadoop.hive.metastore.model.MType]
+CREATE UNIQUE INDEX UNIQUE_TYPE ON TYPES (TYPE_NAME);
+
+
+-- Constraints for table PARTITION_KEYS
+ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEYS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_KEYS_N49 ON PARTITION_KEYS (TBL_ID);
+
+
+-- Constraints for table ROLES for class(es) [org.apache.hadoop.hive.metastore.model.MRole]
+CREATE UNIQUE INDEX ROLEENTITYINDEX ON ROLES (ROLE_NAME);
+
+
+-- Constraints for table PARTITIONS for class(es) [org.apache.hadoop.hive.metastore.model.MPartition]
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITIONS_N49 ON PARTITIONS (SD_ID);
+
+CREATE INDEX PARTITIONS_N50 ON PARTITIONS (TBL_ID);
+
+CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (PART_NAME,TBL_ID);
+
+
+-- Constraints for table INDEX_PARAMS
+ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_FK1 FOREIGN KEY (INDEX_ID) REFERENCES IDXS (INDEX_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX INDEX_PARAMS_N49 ON INDEX_PARAMS (INDEX_ID);
+
+
+-- Constraints for table TBL_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS (TBL_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX TBL_COL_PRIVS_N49 ON TBL_COL_PRIVS (TBL_ID);
+
+
+-- Constraints for table IDXS for class(es) [org.apache.hadoop.hive.metastore.model.MIndex]
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK1 FOREIGN KEY (ORIG_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK3 FOREIGN KEY (INDEX_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE UNIQUE INDEX UNIQUEINDEX ON IDXS (INDEX_NAME,ORIG_TBL_ID);
+
+CREATE INDEX IDXS_N50 ON IDXS (INDEX_TBL_ID);
+
+CREATE INDEX IDXS_N51 ON IDXS (SD_ID);
+
+CREATE INDEX IDXS_N49 ON IDXS (ORIG_TBL_ID);
+
+
+-- Constraints for table BUCKETING_COLS
+ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX BUCKETING_COLS_N49 ON BUCKETING_COLS (SD_ID);
+
+
+-- Constraints for table TYPE_FIELDS
+ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_FK1 FOREIGN KEY (TYPE_NAME) REFERENCES TYPES (TYPES_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TYPE_FIELDS_N49 ON TYPE_FIELDS (TYPE_NAME);
+
+
+-- Constraints for table SD_PARAMS
+ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SD_PARAMS_N49 ON SD_PARAMS (SD_ID);
+
+
+-- Constraints for table GLOBAL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+CREATE UNIQUE INDEX GLOBALPRIVILEGEINDEX ON GLOBAL_PRIVS (PRINCIPAL_NAME,PRINCIPAL_TYPE,USER_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table SDS for class(es) [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+ALTER TABLE SDS ADD CONSTRAINT SDS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
+ALTER TABLE SDS ADD CONSTRAINT SDS_FK2 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SDS_N49 ON SDS (SERDE_ID);
+CREATE INDEX SDS_N50 ON SDS (CD_ID);
+
+
+-- Constraints for table TABLE_PARAMS
+ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID);
+
+
+-- Constraints for table SORT_COLS
+ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SORT_COLS_N49 ON SORT_COLS (SD_ID);
+
+
+-- Constraints for table TBL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TBL_PRIVS_N49 ON TBL_PRIVS (TBL_ID);
+
+CREATE INDEX TABLEPRIVILEGEINDEX ON TBL_PRIVS (TBL_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table DATABASE_PARAMS
+ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX DATABASE_PARAMS_N49 ON DATABASE_PARAMS (DB_ID);
+
+
+-- Constraints for table ROLE_MAP for class(es) [org.apache.hadoop.hive.metastore.model.MRoleMap]
+ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES ROLES (ROLE_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX ROLE_MAP_N49 ON ROLE_MAP (ROLE_ID);
+
+CREATE UNIQUE INDEX USERROLEMAPINDEX ON ROLE_MAP (PRINCIPAL_NAME,ROLE_ID,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table SERDE_PARAMS
+ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SERDE_PARAMS_N49 ON SERDE_PARAMS (SERDE_ID);
+
+
+-- Constraints for table PART_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTPRIVILEGEINDEX ON PART_PRIVS (PART_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX PART_PRIVS_N49 ON PART_PRIVS (PART_ID);
+
+
+-- Constraints for table DB_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+CREATE UNIQUE INDEX DBPRIVILEGEINDEX ON DB_PRIVS (DB_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,DB_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX DB_PRIVS_N49 ON DB_PRIVS (DB_ID);
+
+
+-- Constraints for table TBLS for class(es) [org.apache.hadoop.hive.metastore.model.MTable]
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK2 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TBLS_N49 ON TBLS (DB_ID);
+
+CREATE UNIQUE INDEX UNIQUETABLE ON TBLS (TBL_NAME,DB_ID);
+
+CREATE INDEX TBLS_N50 ON TBLS (SD_ID);
+
+
+-- Constraints for table PARTITION_EVENTS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+CREATE INDEX PARTITIONEVENTINDEX ON PARTITION_EVENTS (PARTITION_NAME);
+
+
+-- Constraints for table FUNCS for class(es) [org.apache.hadoop.hive.metastore.model.MFunctions]
+ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED;
+
+CREATE UNIQUE INDEX UNIQUEFUNCTION ON FUNCS (FUNC_NAME, DB_ID);
+
+CREATE INDEX FUNCS_N49 ON FUNCS (DB_ID);
+
+
+-- Constraints for table FUNC_RU for class(es) [org.apache.hadoop.hive.metastore.model.MFunctions]
+ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_FK1 FOREIGN KEY (FUNC_ID) REFERENCES FUNCS (FUNC_ID) INITIALLY DEFERRED;
+
+CREATE INDEX FUNC_RU_N49 ON FUNC_RU (FUNC_ID);
+
+------------------------------
+-- Transaction and lock tables
+-- These are not part of package jdo, so if you are going to regenerate this file you need to manually add the following section back to the file.
+------------------------------
+CREATE TABLE TXNS (
+  TXN_ID NUMBER(19) PRIMARY KEY,
+  TXN_STATE char(1) NOT NULL,
+  TXN_STARTED NUMBER(19) NOT NULL,
+  TXN_LAST_HEARTBEAT NUMBER(19) NOT NULL,
+  TXN_USER varchar(128) NOT NULL,
+  TXN_HOST varchar(128) NOT NULL
+);
+
+CREATE TABLE TXN_COMPONENTS (
+  TC_TXNID NUMBER(19) REFERENCES TXNS (TXN_ID),
+  TC_DATABASE VARCHAR2(128) NOT NULL,
+  TC_TABLE VARCHAR2(128),
+  TC_PARTITION VARCHAR2(767) NULL
+);
+
+CREATE TABLE COMPLETED_TXN_COMPONENTS (
+  CTC_TXNID NUMBER(19),
+  CTC_DATABASE varchar(128) NOT NULL,
+  CTC_TABLE varchar(128),
+  CTC_PARTITION varchar(767)
+);
+
+CREATE TABLE NEXT_TXN_ID (
+  NTXN_NEXT NUMBER(19) NOT NULL
+);
+INSERT INTO NEXT_TXN_ID VALUES(1);
+
+CREATE TABLE HIVE_LOCKS (
+  HL_LOCK_EXT_ID NUMBER(19) NOT NULL,
+  HL_LOCK_INT_ID NUMBER(19) NOT NULL,
+  HL_TXNID NUMBER(19),
+  HL_DB VARCHAR2(128) NOT NULL,
+  HL_TABLE VARCHAR2(128),
+  HL_PARTITION VARCHAR2(767),
+  HL_LOCK_STATE CHAR(1) NOT NULL,
+  HL_LOCK_TYPE CHAR(1) NOT NULL,
+  HL_LAST_HEARTBEAT NUMBER(19) NOT NULL,
+  HL_ACQUIRED_AT NUMBER(19),
+  HL_USER varchar(128) NOT NULL,
+  HL_HOST varchar(128) NOT NULL,
+  PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID)
+);
+
+CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID);
+
+CREATE TABLE NEXT_LOCK_ID (
+  NL_NEXT NUMBER(19) NOT NULL
+);
+INSERT INTO NEXT_LOCK_ID VALUES(1);
+
+CREATE TABLE COMPACTION_QUEUE (
+  CQ_ID NUMBER(19) PRIMARY KEY,
+  CQ_DATABASE varchar(128) NOT NULL,
+  CQ_TABLE varchar(128) NOT NULL,
+  CQ_PARTITION varchar(767),
+  CQ_STATE char(1) NOT NULL,
+  CQ_TYPE char(1) NOT NULL,
+  CQ_WORKER_ID varchar(128),
+  CQ_START NUMBER(19),
+  CQ_RUN_AS varchar(128)
+);
+
+CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
+  NCQ_NEXT NUMBER(19) NOT NULL
+);
+INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+
+-- -----------------------------------------------------------------
+-- Record schema version. Should be the last step in the init script
+-- -----------------------------------------------------------------
+INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '0.14.0', 'Hive release version 0.14.0');


[48/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-site.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-site.xml
new file mode 100755
index 0000000..cc9c27a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-site.xml
@@ -0,0 +1,527 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>timeline.metrics.service.operation.mode</name>
+    <value>embedded</value>
+    <display-name>Metrics Service operation mode</display-name>
+    <description>
+      Service Operation modes:
+      1) embedded: Metrics stored on local FS, HBase in Standalone mode
+      2) distributed: HBase daemons writing to HDFS
+    </description>
+  </property>
+  <property>
+    <name>timeline.metrics.service.webapp.address</name>
+    <value>0.0.0.0:6188</value>
+    <description>
+      The address of the metrics service web application.
+    </description>
+  </property>
+  <property>
+    <name>timeline.metrics.service.rpc.address</name>
+    <value>0.0.0.0:60200</value>
+    <description>
+      The address of the metrics service rpc listeners.
+    </description>
+  </property>
+  <property>
+    <name>timeline.metrics.aggregator.checkpoint.dir</name>
+    <value>/var/lib/ambari-metrics-collector/checkpoint</value>
+    <display-name>Aggregator checkpoint directory</display-name>
+    <description>
+      Directory to store aggregator checkpoints. Change to a permanent
+      location so that checkpoint ar not lost.
+    </description>
+    <value-attributes>
+      <type>directory</type>
+    </value-attributes>
+  </property>
+  <property>
+    <name>timeline.metrics.host.aggregator.minute.interval</name>
+    <value>300</value>
+    <display-name>Minute host aggregator interval</display-name>
+    <description>
+      Time in seconds to sleep for the minute resolution host based
+      aggregator. Default resolution is 5 minutes.
+    </description>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+  </property>
+  <property>
+    <name>timeline.metrics.host.aggregator.hourly.interval</name>
+    <value>3600</value>
+    <display-name>Hourly host aggregator interval</display-name>
+    <description>
+      Time in seconds to sleep for the hourly resolution host based
+      aggregator. Default resolution is 1 hour.
+    </description>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+  </property>
+  <property>
+    <name>timeline.metrics.daily.aggregator.minute.interval</name>
+    <value>86400</value>
+    <description>
+      Time in seconds to sleep for the day resolution host based
+      aggregator. Default resolution is 24 hours.
+    </description>
+  </property>
+  <property>
+    <name>timeline.metrics.cluster.aggregator.hourly.interval</name>
+    <value>3600</value>
+    <display-name>Hourly cluster aggregator Interval</display-name>
+    <description>
+      Time in seconds to sleep for the hourly resolution cluster wide
+      aggregator. Default is 1 hour.
+    </description>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+  </property>
+  <property>
+    <name>timeline.metrics.cluster.aggregator.daily.interval</name>
+    <value>86400</value>
+    <description>
+      Time in seconds to sleep for the day resolution cluster wide
+      aggregator. Default is 24 hours.
+    </description>
+  </property>
+  <property>
+    <name>timeline.metrics.cluster.aggregator.minute.interval</name>
+    <value>300</value>
+    <display-name>Minute cluster aggregator interval</display-name>
+    <description>
+      Time in seconds to sleep for the minute resolution cluster wide
+      aggregator. Default resolution is 5 minutes.
+    </description>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+  </property>
+  <property>
+    <name>timeline.metrics.cluster.aggregator.second.interval</name>
+    <value>120</value>
+    <display-name>Second cluster aggregator interval</display-name>
+    <description>
+      Time in seconds to sleep for the second resolution cluster wide
+      aggregator. Default resolution is 2 minutes.
+    </description>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+  </property>
+  <property>
+    <name>timeline.metrics.host.aggregator.daily.checkpointCutOffMultiplier</name>
+    <value>1</value>
+    <description>
+      Multiplier value * interval = Max allowed checkpoint lag. Effectively
+      if aggregator checkpoint is greater than max allowed checkpoint delay,
+      the checkpoint will be discarded by the aggregator.
+    </description>
+  </property>
+  <property>
+    <name>timeline.metrics.host.aggregator.hourly.checkpointCutOffMultiplier</name>
+    <value>2</value>
+    <display-name>Hourly host aggregator checkpoint cutOff multiplier</display-name>
+    <description>
+      Multiplier value * interval = Max allowed checkpoint lag. Effectively
+      if aggregator checkpoint is greater than max allowed checkpoint delay,
+      the checkpoint will be discarded by the aggregator.
+    </description>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+  </property>
+  <property>
+    <name>timeline.metrics.host.aggregator.minute.checkpointCutOffMultiplier</name>
+    <value>2</value>
+    <display-name>Minute host aggregator checkpoint cutOff multiplier</display-name>
+    <description>
+      Multiplier value * interval = Max allowed checkpoint lag. Effectively
+      if aggregator checkpoint is greater than max allowed checkpoint delay,
+      the checkpoint will be discarded by the aggregator.
+    </description>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+  </property>
+  <property>
+    <name>timeline.metrics.cluster.aggregator.hourly.checkpointCutOffMultiplier</name>
+    <value>2</value>
+    <display-name>Hourly cluster aggregator checkpoint cutOff multiplier</display-name>
+    <description>
+      Multiplier value * interval = Max allowed checkpoint lag. Effectively
+      if aggregator checkpoint is greater than max allowed checkpoint delay,
+      the checkpoint will be discarded by the aggregator.
+    </description>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+  </property>
+  <property>
+    <name>timeline.metrics.cluster.aggregator.second.checkpointCutOffMultiplier</name>
+    <value>2</value>
+    <display-name>Second cluster aggregator checkpoint cutOff multiplier</display-name>
+    <description>
+      Multiplier value * interval = Max allowed checkpoint lag. Effectively
+      if aggregator checkpoint is greater than max allowed checkpoint delay,
+      the checkpoint will be discarded by the aggregator.
+    </description>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+  </property>
+  <property>
+    <name>timeline.metrics.cluster.aggregator.minute.checkpointCutOffMultiplier</name>
+    <value>2</value>
+    <display-name>Minute cluster aggregator checkpoint cutOff multiplier</display-name>
+    <description>
+      Multiplier value * interval = Max allowed checkpoint lag. Effectively
+      if aggregator checkpoint is greater than max allowed checkpoint delay,
+      the checkpoint will be discarded by the aggregator.
+    </description>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+  </property>
+  <property>
+    <name>timeline.metrics.cluster.aggregator.daily.checkpointCutOffMultiplier</name>
+    <value>1</value>
+    <description>
+      Multiplier value * interval = Max allowed checkpoint lag. Effectively
+      if aggregator checkpoint is greater than max allowed checkpoint delay,
+      the checkpoint will be discarded by the aggregator.
+    </description>
+  </property>
+  <property>
+    <name>timeline.metrics.host.aggregator.daily.disabled</name>
+    <value>false</value>
+    <description>
+      Disable host based daily aggregations.
+    </description>
+  </property>
+  <property>
+    <name>timeline.metrics.host.aggregator.hourly.disabled</name>
+    <value>false</value>
+    <display-name>Disable Hourly host aggregator</display-name>
+    <description>
+      Disable host based hourly aggregations.
+    </description>
+  </property>
+  <property>
+    <name>timeline.metrics.host.aggregator.minute.disabled</name>
+    <value>false</value>
+    <display-name>Disable Minute host aggregator</display-name>
+    <description>
+      Disable host based minute aggregations.
+    </description>
+  </property>
+  <property>
+    <name>timeline.metrics.cluster.aggregator.daily.disabled</name>
+    <value>false</value>
+    <description>
+      Disable cluster based daily aggregations.
+    </description>
+  </property>
+  <property>
+    <name>timeline.metrics.cluster.aggregator.hourly.disabled</name>
+    <display-name>Disable Hourly cluster aggregator</display-name>
+    <value>false</value>
+    <description>
+      Disable cluster based hourly aggregations.
+    </description>
+  </property>
+  <property>
+    <name>timeline.metrics.cluster.aggregator.minute.disabled</name>
+    <value>false</value>
+    <display-name>Disable minute cluster aggregator</display-name>
+    <description>
+      Disable cluster based minute aggregations.
+    </description>
+  </property>
+  <property>
+    <name>timeline.metrics.cluster.aggregator.second.disabled</name>
+    <value>false</value>
+    <display-name>Disable second cluster aggregator</display-name>
+    <description>
+      Disable cluster based second aggregations.
+    </description>
+  </property>
+  <property>
+    <name>timeline.metrics.cluster.aggregator.second.timeslice.interval</name>
+    <value>30</value>
+    <display-name>Second cluster aggregator timeslice interval</display-name>
+    <description>
+      Lowest resolution of desired data for cluster level second aggregates.
+    </description>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+  </property>
+  <property>
+    <name>timeline.metrics.host.aggregator.daily.ttl</name>
+    <value>31536000</value>
+    <description>
+      Host based daily resolution data purge interval. Default is 1 year.
+    </description>
+  </property>
+  <property>
+    <name>timeline.metrics.host.aggregator.hourly.ttl</name>
+    <value>2592000</value>
+    <description>
+      Host based hourly resolution data purge interval. Default is 30 days.
+    </description>
+  </property>
+  <property>
+    <name>timeline.metrics.host.aggregator.minute.ttl</name>
+    <value>604800</value>
+    <description>
+      Host based minute resolution data purge interval. Default is 7 days.
+    </description>
+  </property>
+  <property>
+    <name>timeline.metrics.cluster.aggregator.second.ttl</name>
+    <value>2592000</value>
+    <description>
+      Cluster wide second resolution data purge interval. Default is 7 days.
+    </description>
+  </property>
+  <property>
+    <name>timeline.metrics.cluster.aggregator.minute.ttl</name>
+    <value>7776000</value>
+    <description>
+      Cluster wide minute resolution data purge interval. Default is 30 days.
+    </description>
+  </property>
+  <property>
+    <name>timeline.metrics.cluster.aggregator.hourly.ttl</name>
+    <value>31536000</value>
+    <description>
+      Cluster wide hourly resolution data purge interval. Default is 1 year.
+    </description>
+  </property>
+  <property>
+    <name>timeline.metrics.cluster.aggregator.daily.ttl</name>
+    <value>63072000</value>
+    <description>
+      Cluster wide daily resolution data purge interval. Default is 2 years.
+    </description>
+  </property>
+  <property>
+    <name>timeline.metrics.host.aggregator.ttl</name>
+    <value>86400</value>
+    <description>
+      1 minute resolution data purge interval. Default is 1 day.
+    </description>
+  </property>
+  <property>
+    <name>timeline.metrics.hbase.data.block.encoding</name>
+    <value>FAST_DIFF</value>
+    <description>
+      Codecs are enabled on a table by setting the DATA_BLOCK_ENCODING property.
+      Default encoding is FAST_DIFF. This can be changed only before creating
+      tables.
+    </description>
+  </property>
+  <property>
+    <name>timeline.metrics.hbase.compression.scheme</name>
+    <value>SNAPPY</value>
+    <description>
+      Compression codes need to be installed and available before setting the
+      scheme. Default compression is SNAPPY. Disable by setting to None.
+      This can be changed only before creating tables.
+    </description>
+  </property>
+  <property>
+    <name>timeline.metrics.service.default.result.limit</name>
+    <value>15840</value>
+    <description>
+      Max result limit on number of rows returned. Calculated as follows:
+      22 aggregate metrics/min * 2 * 60 * 6 : Retrieve 10 SECOND data for 2 hours.
+    </description>
+    <display-name>Metrics service default result limit</display-name>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+  </property>
+  <property>
+    <name>timeline.metrics.service.checkpointDelay</name>
+    <value>60</value>
+    <display-name>Metrics service checkpoint delay</display-name>
+    <description>
+      Time in seconds to sleep on the first run or when the checkpoint is
+      too old.
+    </description>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+  </property>
+  <property>
+    <name>timeline.metrics.service.resultset.fetchSize</name>
+    <display-name>Metrics service resultset fetchSize</display-name>
+    <value>2000</value>
+    <description>
+      JDBC resultset prefect size for aggregator queries.
+    </description>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+  </property>
+  <!-- Phoenix properties that would manifest in the hbase-site.xml on the client side -->
+  <property>
+    <name>phoenix.query.maxGlobalMemoryPercentage</name>
+    <value>25</value>
+    <description>
+      Percentage of total heap memory (i.e. Runtime.getRuntime().maxMemory())
+      that all threads may use.
+    </description>
+  </property>
+  <property>
+    <name>phoenix.spool.directory</name>
+    <value>/tmp</value>
+    <description>
+      Set directory for Phoenix spill files. If possible set this to a
+      different mount point from the one for hbase.rootdir in embedded mode.
+    </description>
+  </property>
+  <property>
+    <name>timeline.metrics.service.cluster.aggregator.appIds</name>
+    <value>datanode,nodemanager,hbase</value>
+    <description>
+      List of application ids to use for aggregating host level metrics for
+      an application. Example: bytes_read across Yarn Nodemanagers.
+    </description>
+  </property>
+  <property>
+    <name>timeline.metrics.service.use.groupBy.aggregators</name>
+    <value>true</value>
+    <description>
+      Use a groupBy aggregated query to perform host level aggregations vs
+      in-memory aggregations.
+    </description>
+  </property>
+  <property>
+    <name>timeline.metrics.host.aggregate.splitpoints</name>
+    <value> </value>
+    <description>
+      Pre-split regions using the split points corresponding to this property
+      for the precision table that stores seconds aggregate data.
+    </description>
+    <depends-on>
+      <property>
+        <type>ams-hbase-site</type>
+        <name>hbase.regionserver.global.memstore.upperLimit</name>
+      </property>
+      <property>
+        <type>ams-hbase-site</type>
+        <name>hbase.hregion.memstore.flush.size</name>
+      </property>
+      <property>
+        <type>ams-hbase-env</type>
+        <name>hbase_master_heapsize</name>
+      </property>
+      <property>
+        <type>ams-hbase-env</type>
+        <name>hbase_regionserver_heapsize</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>timeline.metrics.cluster.aggregate.splitpoints</name>
+    <value> </value>
+    <description>
+      Pre-split regions using the split points corresponding to this property
+      for the aggregate table that stores seconds aggregate data across hosts.
+    </description>
+    <depends-on>
+      <property>
+        <type>ams-hbase-site</type>
+        <name>hbase.regionserver.global.memstore.upperLimit</name>
+      </property>
+      <property>
+        <type>ams-hbase-site</type>
+        <name>hbase.hregion.memstore.flush.size</name>
+      </property>
+      <property>
+        <type>ams-hbase-env</type>
+        <name>hbase_master_heapsize</name>
+      </property>
+      <property>
+        <type>ams-hbase-env</type>
+        <name>hbase_regionserver_heapsize</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>timeline.metrics.sink.report.interval</name>
+    <value>60</value>
+    <description>
+      Time in seconds to sleep before report metrics to collector.
+      Default resolution is 1 minute.
+    </description>
+  </property>
+  <property>
+    <name>timeline.metrics.sink.collection.period</name>
+    <value>60</value>
+    <description>
+      The interval between two service metrics data exports.
+    </description>
+  </property>
+  <property>
+    <name>timeline.metrics.service.watcher.initial.delay</name>
+    <value>600</value>
+    <description>
+      The time to delay first watcher check execution
+      Default resolution is 10 minutes.
+    </description>
+  </property>
+  <property>
+    <name>timeline.metrics.service.watcher.delay</name>
+    <value>30</value>
+    <description>
+      The delay between the termination of one
+      watcher check execution and the commencement of the next
+      Default resolution is 30 seconds.
+    </description>
+  </property>
+  <property>
+    <name>timeline.metrics.service.watcher.timeout</name>
+    <value>30</value>
+    <description>
+      The maximum time to wait for a single watcher check execution
+      Default resolution is 30 seconds.
+    </description>
+  </property>
+  <property>
+    <name>timeline.metrics.hbase.fifo.compaction.enabled</name>
+    <value>true</value>
+    <description>
+      Enable Compaction policy for lower precision and minute aggregate tables.
+    </description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/kerberos.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/kerberos.json
new file mode 100755
index 0000000..03c3f93
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/kerberos.json
@@ -0,0 +1,122 @@
+{
+  "services": [
+    {
+      "name": "AMBARI_METRICS",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/hdfs"
+        }
+      ],
+      "components": [
+        {
+          "name": "METRICS_COLLECTOR",
+          "identities": [
+            {
+              "name": "ams_hbase_master_hbase",
+              "principal": {
+                "value": "amshbase/_HOST@${realm}",
+                "type": "service",
+                "configuration": "ams-hbase-security-site/hbase.master.kerberos.principal",
+                "local_username": "${ams-env/ambari_metrics_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/ams-hbase.master.keytab",
+                "owner": {
+                  "name": "${ams-env/ambari_metrics_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "ams-hbase-security-site/hbase.master.keytab.file"
+              }
+            },
+            {
+              "name": "ams_hbase_regionserver_hbase",
+              "principal": {
+                "value": "amshbase/_HOST@${realm}",
+                "type": "service",
+                "configuration": "ams-hbase-security-site/hbase.regionserver.kerberos.principal",
+                "local_username": "${ams-env/ambari_metrics_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/ams-hbase.regionserver.keytab",
+                "owner": {
+                  "name": "${ams-env/ambari_metrics_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "ams-hbase-security-site/hbase.regionserver.keytab.file"
+              }
+            },
+            {
+              "name": "ams_collector",
+              "principal": {
+                "value": "amshbase/_HOST@${realm}",
+                "type": "service",
+                "configuration": "ams-hbase-security-site/hbase.myclient.principal",
+                "local_username": "${ams-env/ambari_metrics_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/ams.collector.keytab",
+                "owner": {
+                  "name": "${ams-env/ambari_metrics_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "ams-hbase-security-site/hbase.myclient.keytab"
+              }
+            },
+            {
+              "name": "ams_zookeeper",
+              "principal": {
+                "value": "zookeeper/_HOST@${realm}",
+                "type": "service",
+                "configuration": "ams-hbase-security-site/ams.zookeeper.principal",
+                "local_username": "${ams-env/ambari_metrics_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/zk.service.ams.keytab",
+                "owner": {
+                  "name": "${ams-env/ambari_metrics_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "ams-hbase-security-site/ams.zookeeper.keytab"
+              }
+            }
+          ],
+          "configurations": [
+            {
+              "ams-hbase-security-site": {
+                "hbase.security.authentication": "kerberos",
+                "hbase.security.authorization": "true",
+                "hadoop.security.authentication": "kerberos",
+                "hbase.coprocessor.master.classes": "org.apache.hadoop.hbase.security.access.AccessController",
+                "hbase.coprocessor.region.classes": "org.apache.hadoop.hbase.security.token.TokenProvider,org.apache.hadoop.hbase.security.access.AccessController",
+                "zookeeper.znode.parent": "/ams-hbase-secure",
+                "hbase.zookeeper.property.kerberos.removeHostFromPrincipal": "true",
+                "hbase.zookeeper.property.kerberos.removeRealmFromPrincipal": "true",
+                "hbase.zookeeper.property.authProvider.1": "org.apache.zookeeper.server.auth.SASLAuthenticationProvider",
+                "hbase.zookeeper.property.jaasLoginRenew": "3600000"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/metainfo.xml
new file mode 100755
index 0000000..5002459
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/metainfo.xml
@@ -0,0 +1,147 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>AMBARI_METRICS</name>
+      <displayName>Ambari Metrics</displayName>
+      <version>0.1.0</version>
+      <comment>A system for metrics collection that provides storage and retrieval capability for metrics collected from the cluster
+      </comment>
+      <components>
+        <component>
+          <name>METRICS_COLLECTOR</name>
+          <displayName>Metrics Collector</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <versionAdvertised>false</versionAdvertised>
+          <timelineAppid>AMS-HBASE</timelineAppid>
+          <dependencies>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+              <scope>cluster</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/metrics_collector.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+        </component>
+        <component>
+          <name>METRICS_MONITOR</name>
+          <displayName>Metrics Monitor</displayName>
+          <category>SLAVE</category>
+          <cardinality>ALL</cardinality>
+          <versionAdvertised>false</versionAdvertised>
+          <auto-deploy>
+            <enabled>true</enabled>
+          </auto-deploy>
+          <commandScript>
+            <script>scripts/metrics_monitor.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>redhat7,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>ambari-metrics-collector</name>
+              <skipUpgrade>true</skipUpgrade>
+              <condition>should_install_ams_collector</condition>
+            </package>
+            <package>
+              <name>ambari-metrics-monitor</name>
+              <skipUpgrade>true</skipUpgrade>
+            </package>
+            <package>
+              <name>ambari-metrics-hadoop-sink</name>
+              <skipUpgrade>true</skipUpgrade>
+            </package>
+            <package>
+              <name>gcc</name>
+            </package>
+            <package>
+              <name>snappy</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>debian7,ubuntu12,ubuntu14</osFamily>
+          <packages>
+            <package>
+              <name>ambari-metrics-assembly</name>
+              <skipUpgrade>true</skipUpgrade>
+            </package>
+            <package>
+              <name>gcc</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>winsrv6</osFamily>
+          <packages>
+            <package>
+              <name>ambari-metrics-collector.msi</name>
+            </package>
+            <package>
+              <name>ambari-metrics-monitor.msi</name>
+            </package>
+            <package>
+              <name>ambari-metrics-hadoop-sink.msi</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>600</timeout>
+      </commandScript>
+
+      <requiredServices>
+        <service>ZOOKEEPER</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>ams-site</config-type>
+        <config-type>ams-log4j</config-type>
+        <config-type>ams-env</config-type>
+        <config-type>ams-hbase-policy</config-type>
+        <config-type>ams-hbase-site</config-type>
+        <config-type>ams-hbase-security-site</config-type>
+        <config-type>ams-hbase-env</config-type>
+        <config-type>ams-hbase-log4j</config-type>
+      </configuration-dependencies>
+
+      <excluded-config-types>
+        <config-type>storm-site</config-type>
+      </excluded-config-types>
+
+    </service>
+  </services>
+</metainfo>


[04/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/YARN/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/YARN/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/YARN/metainfo.xml
new file mode 100755
index 0000000..ea4a822
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/YARN/metainfo.xml
@@ -0,0 +1,82 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>YARN</name>
+      <version>2.7.1</version>
+      
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>hadoop_4_1_*-yarn</name>
+            </package>
+            <package>
+              <name>hadoop_4_1_*-mapreduce</name>
+            </package>
+            <package>
+              <name>hadoop_4_1_*-hdfs</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      
+      <themes>
+        <theme>
+           <fileName>theme.json</fileName>
+           <default>true</default>
+        </theme>
+      </themes>
+      
+    </service>
+    
+    <service>
+      <name>MAPREDUCE2</name>
+      <displayName>MapReduce2</displayName>
+      <version>2.7.1</version>
+      <configuration-dir>configuration-mapred</configuration-dir>
+      
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>hadoop_4_1_*-mapreduce</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      
+      <themes-dir>themes-mapred</themes-dir>
+      <themes>
+        <theme>
+          <fileName>theme.json</fileName>
+          <default>true</default>
+        </theme>
+      </themes>
+      
+    </service>
+    
+  </services>
+</metainfo>
+
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/YARN/themes-mapred/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/YARN/themes-mapred/theme.json b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/YARN/themes-mapred/theme.json
new file mode 100755
index 0000000..5019447
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/YARN/themes-mapred/theme.json
@@ -0,0 +1,132 @@
+{
+  "name": "default",
+  "description": "Default theme for MAPREDUCE service",
+  "configuration": {
+    "layouts": [
+      {
+        "name": "default",
+        "tabs": [
+          {
+            "name": "settings",
+            "display-name": "Settings",
+            "layout": {
+              "tab-columns": "1",
+              "tab-rows": "1",
+              "sections": [
+                {
+                  "name": "section-mr-scheduler",
+                  "display-name": "MapReduce",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "3",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-mr-scheduler-row1-col1",
+                      "display-name": "MapReduce Framework",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-mr-scheduler-row1-col2",
+                      "row-index": "0",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-mr-scheduler-row1-col3",
+                      "row-index": "0",
+                      "column-index": "2",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-mr-scheduler-row2-col1",
+                      "display-name": "MapReduce AppMaster",
+                      "row-index": "1",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "3"
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "mapred-site/mapreduce.map.memory.mb",
+          "subsection-name": "subsection-mr-scheduler-row1-col1"
+        },
+        {
+          "config": "mapred-site/mapreduce.reduce.memory.mb",
+          "subsection-name": "subsection-mr-scheduler-row1-col2"
+        },
+        {
+          "config": "mapred-site/yarn.app.mapreduce.am.resource.mb",
+          "subsection-name": "subsection-mr-scheduler-row2-col1"
+        },
+        {
+          "config": "mapred-site/mapreduce.task.io.sort.mb",
+          "subsection-name": "subsection-mr-scheduler-row1-col3"
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "mapred-site/mapreduce.map.memory.mb",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "mapred-site/mapreduce.reduce.memory.mb",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "mapred-site/yarn.app.mapreduce.am.resource.mb",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "mapred-site/mapreduce.task.io.sort.mb",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/YARN/themes/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/YARN/themes/theme.json b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/YARN/themes/theme.json
new file mode 100755
index 0000000..758cf0c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/YARN/themes/theme.json
@@ -0,0 +1,250 @@
+{
+  "name": "default",
+  "description": "Default theme for YARN service",
+  "configuration": {
+    "layouts": [
+      {
+        "name": "default",
+        "tabs": [
+          {
+            "name": "settings",
+            "display-name": "Settings",
+            "layout": {
+              "tab-columns": "3",
+              "tab-rows": "2",
+              "sections": [
+                {
+                  "name": "section-nm-sizing",
+                  "display-name": "Memory",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "2",
+                  "section-columns": "2",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-nm-sizing-col1",
+                      "display-name": "Node",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-nm-sizing-col2",
+                      "display-name": "Container",
+                      "row-index": "0",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-yarn-platform-features",
+                  "display-name": "YARN Features",
+                  "row-index": "0",
+                  "column-index": "2",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-yarn-platform-features-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-container-sizing",
+                  "display-name": "CPU",
+                  "row-index": "1",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "2",
+                  "section-columns": "2",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-container-sizing-col1",
+                      "display-name": "Node",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-container-sizing-col2",
+                      "display-name": "Container",
+                      "row-index": "0",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "yarn-site/yarn.nodemanager.resource.memory-mb",
+          "subsection-name": "subsection-nm-sizing-col1"
+        },
+        {
+          "config": "yarn-site/yarn.scheduler.minimum-allocation-mb",
+          "subsection-name": "subsection-nm-sizing-col2"
+        },
+        {
+          "config": "yarn-site/yarn.scheduler.maximum-allocation-mb",
+          "subsection-name": "subsection-nm-sizing-col2"
+        },
+        {
+          "config": "yarn-site/yarn.node-labels.enabled",
+          "subsection-name": "subsection-yarn-platform-features-col1"
+        },
+        {
+          "config": "yarn-site/yarn.resourcemanager.scheduler.monitor.enable",
+          "subsection-name": "subsection-yarn-platform-features-col1"
+        },
+        {
+          "config": "capacity-scheduler/yarn.scheduler.capacity.resource-calculator",
+          "subsection-name": "subsection-container-sizing-col1"
+        },
+        {
+          "config": "yarn-env/yarn_cgroups_enabled",
+          "subsection-name": "subsection-container-sizing-col1"
+        },
+        {
+          "config": "yarn-site/yarn.nodemanager.resource.percentage-physical-cpu-limit",
+          "subsection-name": "subsection-container-sizing-col1"
+        },
+        {
+          "config": "yarn-site/yarn.nodemanager.resource.cpu-vcores",
+          "subsection-name": "subsection-container-sizing-col1"
+        },
+        {
+          "config": "yarn-site/yarn.scheduler.minimum-allocation-vcores",
+          "subsection-name": "subsection-container-sizing-col2"
+        },
+        {
+          "config": "yarn-site/yarn.scheduler.maximum-allocation-vcores",
+          "subsection-name": "subsection-container-sizing-col2"
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "yarn-site/yarn.nodemanager.resource.memory-mb",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "yarn-site/yarn.nodemanager.resource.percentage-physical-cpu-limit",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "percent"
+            }
+          ]
+        }
+      },
+      {
+        "config": "yarn-site/yarn.nodemanager.resource.cpu-vcores",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "yarn-site/yarn.scheduler.minimum-allocation-mb",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "yarn-site/yarn.scheduler.maximum-allocation-mb",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "yarn-site/yarn.scheduler.minimum-allocation-vcores",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "yarn-site/yarn.scheduler.maximum-allocation-vcores",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "yarn-site/yarn.node-labels.enabled",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "yarn-env/yarn_cgroups_enabled",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "yarn-site/yarn.resourcemanager.scheduler.monitor.enable",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "capacity-scheduler/yarn.scheduler.capacity.resource-calculator",
+        "widget": {
+          "type": "toggle"
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/ZOOKEEPER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/ZOOKEEPER/metainfo.xml
new file mode 100755
index 0000000..038bdaf
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/ZOOKEEPER/metainfo.xml
@@ -0,0 +1,38 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ZOOKEEPER</name>
+      <version>3.4.6</version>
+      
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>zookeeper_4_1_*</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/stack_advisor.py
new file mode 100755
index 0000000..a64d4e9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/stack_advisor.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env ambari-python-wrap
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from math import ceil, sqrt
+
+
+class BigInsights41StackAdvisor(BigInsights40StackAdvisor):
+
+  def getServiceConfigurationRecommenderDict(self):
+    parentRecommendConfDict = super(BigInsights41StackAdvisor, self).getServiceConfigurationRecommenderDict()
+    childRecommendConfDict = {
+      "MAPREDUCE2": self.recommendMapReduce2Configurations
+    }
+    parentRecommendConfDict.update(childRecommendConfDict)
+    return parentRecommendConfDict
+
+  def recommendMapReduce2Configurations(self, configurations, clusterData, services, hosts):
+    super(BigInsights41StackAdvisor, self).recommendMapReduce2Configurations(configurations, clusterData, services, hosts)
+
+    putMapredProperty = self.putProperty(configurations, "mapred-site", services)
+    putMapredProperty('yarn.app.mapreduce.am.command-opts', "-Xmx" + str(int(0.8 * int(configurations["mapred-site"]["properties"]["yarn.app.mapreduce.am.resource.mb"]))) + "m" + " -Diop.version=${iop.version}")

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/after-INSTALL/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/after-INSTALL/scripts/hook.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/after-INSTALL/scripts/hook.py
new file mode 100755
index 0000000..8a583b3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/after-INSTALL/scripts/hook.py
@@ -0,0 +1,37 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.script.hook import Hook
+from shared_initialization import link_configs
+from shared_initialization import setup_config
+from shared_initialization import setup_stack_symlinks
+
+class AfterInstallHook(Hook):
+
+  def hook(self, env):
+    import params
+
+    env.set_params(params)
+    setup_stack_symlinks()
+    setup_config()
+
+    link_configs(self.stroutfile)
+
+if __name__ == "__main__":
+  AfterInstallHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/after-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/after-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/after-INSTALL/scripts/params.py
new file mode 100755
index 0000000..4f589f8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/after-INSTALL/scripts/params.py
@@ -0,0 +1,101 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions import default
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import format_jvm_option
+from resource_management.libraries.functions.version import format_stack_version
+
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+dfs_type = default("/commandParams/dfs_type", "")
+
+is_parallel_execution_enabled = int(default("/agentConfigParams/agent/parallel_execution", 0)) == 1
+
+sudo = AMBARI_SUDO_BINARY
+
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+# current host stack version
+current_version = default("/hostLevelParams/current_version", None)
+
+# default hadoop params
+mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
+hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
+
+# IOP 4.0+ params
+if Script.is_stack_greater_or_equal("4.0"):
+  mapreduce_libs_path = "/usr/iop/current/hadoop-mapreduce-client/*"
+
+  # not supported in IOP 4.0+
+  hadoop_conf_empty_dir = None
+
+versioned_stack_root = '/usr/iop/current'
+
+#security params
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+#java params
+java_home = config['hostLevelParams']['java_home']
+
+#hadoop params
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
+hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
+
+jsvc_path = "/usr/lib/bigtop-utils"
+
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
+namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
+namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
+namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
+
+jtnode_opt_newsize = "200m"
+jtnode_opt_maxnewsize = "200m"
+jtnode_heapsize =  "1024m"
+ttnode_heapsize = "1024m"
+
+dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
+mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
+mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
+
+#users and groups
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+user_group = config['configurations']['cluster-env']['user_group']
+
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+has_namenode = not len(namenode_host) == 0
+
+if has_namenode or dfs_type == 'HCFS':
+  hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
+
+link_configs_lock_file = os.path.join(tmp_dir, "link_configs_lock_file")
+stack_select_lock_file = os.path.join(tmp_dir, "stack_select_lock_file")
+
+upgrade_suspended = default("/roleParams/upgrade_suspended", False)

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/after-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/after-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/after-INSTALL/scripts/shared_initialization.py
new file mode 100755
index 0000000..d306066
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/after-INSTALL/scripts/shared_initialization.py
@@ -0,0 +1,108 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+
+import ambari_simplejson as json
+from resource_management.core.logger import Logger
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.version import compare_versions
+from resource_management.libraries.functions.fcntl_based_process_lock import FcntlBasedProcessLock
+from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.libraries.script import Script
+
+
+def setup_stack_symlinks():
+  """
+  Invokes <stack-selector-tool> set all against a calculated fully-qualified, "normalized" version based on a
+  stack version, such as "4.1". This should always be called after a component has been
+  installed to ensure that all IOP pointers are correct. The stack upgrade logic does not
+  interact with this since it's done via a custom command and will not trigger this hook.
+  :return:
+  """
+  import params
+  if params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '4.0') >= 0:
+    # try using the exact version first, falling back in just the stack if it's not defined
+    # which would only be during an intial cluster installation
+    version = params.current_version if params.current_version is not None else params.stack_version_unformatted
+
+    if not params.upgrade_suspended:
+      # On parallel command execution this should be executed by a single process at a time.
+      with FcntlBasedProcessLock(params.stack_select_lock_file, enabled = params.is_parallel_execution_enabled, skip_fcntl_failures = True):
+        stack_select.select_all(version)
+
+def setup_config():
+  import params
+  stackversion = params.stack_version_unformatted
+  Logger.info("FS Type: {0}".format(params.dfs_type))
+
+  is_hadoop_conf_dir_present = False
+  if hasattr(params, "hadoop_conf_dir") and params.hadoop_conf_dir is not None and os.path.exists(params.hadoop_conf_dir):
+    is_hadoop_conf_dir_present = True
+  else:
+    Logger.warning("Parameter hadoop_conf_dir is missing or directory does not exist. This is expected if this host does not have any Hadoop components.")
+
+  if is_hadoop_conf_dir_present and (params.has_namenode or stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS'):
+    # create core-site only if the hadoop config diretory exists
+    XmlConfig("core-site.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations']['core-site'],
+              configuration_attributes=params.config['configuration_attributes']['core-site'],
+              owner=params.hdfs_user,
+              group=params.user_group,
+              only_if=format("ls {hadoop_conf_dir}"))
+
+
+def load_version(struct_out_file):
+  """
+  Load version from file.  Made a separate method for testing
+  """
+  json_version = None
+  try:
+    if os.path.exists(struct_out_file):
+      with open(struct_out_file, 'r') as fp:
+        json_info = json.load(fp)
+        json_version = json_info['version']
+  except:
+    pass
+
+  return json_version
+  
+
+def link_configs(struct_out_file):
+  """
+  Links configs, only on a fresh install of IOP-4.1 and higher
+  """
+  import params
+
+  if not Script.is_stack_greater_or_equal("4.1"):
+    Logger.info("Can only link configs for IOP-4.1 and higher.")
+    return
+
+  json_version = load_version(struct_out_file)
+
+  if not json_version:
+    Logger.info("Could not load 'version' from {0}".format(struct_out_file))
+    return
+
+  # On parallel command execution this should be executed by a single process at a time.
+  with FcntlBasedProcessLock(params.link_configs_lock_file, enabled = params.is_parallel_execution_enabled, skip_fcntl_failures = True):
+    for k, v in conf_select.get_package_dirs().iteritems():
+      conf_select.convert_conf_directories_to_symlinks(k, json_version, v)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-ANY/files/changeToSecureUid.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-ANY/files/changeToSecureUid.sh b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-ANY/files/changeToSecureUid.sh
new file mode 100755
index 0000000..1e1d655
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-ANY/files/changeToSecureUid.sh
@@ -0,0 +1,63 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+username=$1
+directories=$2
+newUid=$3
+
+function find_available_uid() {
+ for ((i=1001; i<=2000; i++))
+ do
+   grep -q $i /etc/passwd
+   if [ "$?" -ne 0 ]
+   then
+    newUid=$i
+    break
+   fi
+ done
+}
+
+if [ -z $2 ]; then
+  test $(id -u ${username} 2>/dev/null)
+  if [ $? -ne 1 ]; then
+   newUid=`id -u ${username}`
+  else
+   find_available_uid
+  fi
+  echo $newUid
+  exit 0
+fi
+
+if [ $newUid -eq 0 ]
+then
+  echo "Failed to find Uid between 1000 and 2000"
+  exit 1
+fi
+
+
+set -e
+dir_array=($(echo $directories | sed 's/,/\n/g'))
+old_uid=$(id -u $username)
+sudo_prefix="/var/lib/ambari-agent/ambari-sudo.sh -H -E"
+echo "Changing uid of $username from $old_uid to $newUid"
+echo "Changing directory permisions for ${dir_array[@]}"
+$sudo_prefix usermod -u $newUid $username && for dir in ${dir_array[@]} ; do ls $dir 2> /dev/null && echo "Changing permission for $dir" && $sudo_prefix chown -Rh $newUid $dir ; done
+exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-ANY/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-ANY/scripts/hook.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-ANY/scripts/hook.py
new file mode 100755
index 0000000..c34be0b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-ANY/scripts/hook.py
@@ -0,0 +1,36 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from shared_initialization import *
+
+class BeforeAnyHook(Hook):
+
+  def hook(self, env):
+    import params
+    env.set_params(params)
+
+    setup_users()
+    if params.has_namenode or params.dfs_type == 'HCFS':
+      setup_hadoop_env()
+    setup_java()
+
+if __name__ == "__main__":
+  BeforeAnyHook().execute()
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-ANY/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-ANY/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-ANY/scripts/params.py
new file mode 100755
index 0000000..b0467a9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-ANY/scripts/params.py
@@ -0,0 +1,241 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import collections
+import re
+import os
+
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions import default
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import format_jvm_option
+from resource_management.libraries.functions.is_empty import is_empty
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.version import compare_versions
+from resource_management.libraries.functions.expect import expect
+from ambari_commons.os_check import OSCheck
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+
+
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+dfs_type = default("/commandParams/dfs_type", "")
+
+artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
+jdk_name = default("/hostLevelParams/jdk_name", None)
+java_home = config['hostLevelParams']['java_home']
+java_version = expect("/hostLevelParams/java_version", int)
+jdk_location = config['hostLevelParams']['jdk_location']
+
+sudo = AMBARI_SUDO_BINARY
+
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+restart_type = default("/commandParams/restart_type", "")
+version = default("/commandParams/version", None)
+# Handle upgrade and downgrade
+if (restart_type.lower() == "rolling_upgrade" or restart_type.lower() == "nonrolling_upgrade") and version:
+  stack_version_formatted = format_stack_version(version)
+
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+
+# Some datanode settings
+dfs_dn_addr = default('/configurations/hdfs-site/dfs.datanode.address', None)
+dfs_dn_http_addr = default('/configurations/hdfs-site/dfs.datanode.http.address', None)
+dfs_dn_https_addr = default('/configurations/hdfs-site/dfs.datanode.https.address', None)
+dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None)
+secure_dn_ports_are_in_use = False
+
+def get_port(address):
+  """
+  Extracts port from the address like 0.0.0.0:1019
+  """
+  if address is None:
+    return None
+  m = re.search(r'(?:http(?:s)?://)?([\w\d.]*):(\d{1,5})', address)
+  if m is not None:
+    return int(m.group(2))
+  else:
+    return None
+
+def is_secure_port(port):
+  """
+  Returns True if port is root-owned at *nix systems
+  """
+  if port is not None:
+    return port < 1024
+  else:
+    return False
+
+# hadoop default params
+mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+
+# upgrades would cause these directories to have a version instead of "current"
+# which would cause a lot of problems when writing out hadoop-env.sh; instead
+# force the use of "current" in the hook
+hadoop_home = stack_select.get_hadoop_dir("home", force_latest_on_upgrade=True)
+hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec", force_latest_on_upgrade=True)
+
+hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
+hadoop_secure_dn_user = hdfs_user
+hadoop_dir = "/etc/hadoop"
+versioned_stack_root = '/usr/iop/current'
+hadoop_java_io_tmpdir = os.path.join(tmp_dir, "hadoop_java_io_tmpdir")
+datanode_max_locked_memory = config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory']
+is_datanode_max_locked_memory_set = not is_empty(config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory'])
+
+# IOP 4.0+ params
+if Script.is_stack_greater_or_equal("4.0"):
+  mapreduce_libs_path = "/usr/iop/current/hadoop-mapreduce-client/*"
+
+  # not supported in IOP 4.0+
+  hadoop_conf_empty_dir = None
+
+  if not security_enabled:
+    hadoop_secure_dn_user = '""'
+  else:
+    dfs_dn_port = get_port(dfs_dn_addr)
+    dfs_dn_http_port = get_port(dfs_dn_http_addr)
+    dfs_dn_https_port = get_port(dfs_dn_https_addr)
+    # We try to avoid inability to start datanode as a plain user due to usage of root-owned ports
+    if dfs_http_policy == "HTTPS_ONLY":
+      secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_https_port)
+    elif dfs_http_policy == "HTTP_AND_HTTPS":
+      secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port) or is_secure_port(dfs_dn_https_port)
+    else:   # params.dfs_http_policy == "HTTP_ONLY" or not defined:
+      secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port)
+    if secure_dn_ports_are_in_use:
+      hadoop_secure_dn_user = hdfs_user
+    else:
+      hadoop_secure_dn_user = '""'
+
+#hadoop params
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
+hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
+
+jsvc_path = "/usr/lib/bigtop-utils"
+
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
+namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
+namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
+namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
+
+jtnode_opt_newsize = "200m"
+jtnode_opt_maxnewsize = "200m"
+jtnode_heapsize =  "1024m"
+ttnode_heapsize = "1024m"
+
+dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
+nfsgateway_heapsize = config['configurations']['hadoop-env']['nfsgateway_heapsize']
+mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
+mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
+hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
+
+#users and groups
+hbase_user = config['configurations']['hbase-env']['hbase_user']
+smoke_user =  config['configurations']['cluster-env']['smokeuser']
+gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
+gmond_user = config['configurations']['ganglia-env']["gmond_user"]
+tez_user = config['configurations']['tez-env']["tez_user"]
+oozie_user = config['configurations']['oozie-env']["oozie_user"]
+falcon_user = config['configurations']['falcon-env']["falcon_user"]
+ranger_user = config['configurations']['ranger-env']["ranger_user"]
+
+user_group = config['configurations']['cluster-env']['user_group']
+
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+falcon_server_hosts = default("/clusterHostInfo/falcon_server_hosts", [])
+ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+
+has_namenode = not len(namenode_host) == 0
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+has_tez = 'tez-site' in config['configurations']
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_oozie_server = not len(oozie_servers) == 0
+has_falcon_server_hosts = not len(falcon_server_hosts) == 0
+has_ranger_admin = not len(ranger_admin_hosts) == 0
+
+if has_namenode or dfs_type == 'HCFS':
+  hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
+
+hbase_tmp_dir = "/tmp/hbase-hbase"
+
+proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
+ranger_group = config['configurations']['ranger-env']['ranger_group']
+dfs_cluster_administrators_group = config['configurations']['hdfs-site']["dfs.cluster.administrators"]
+
+ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False)
+fetch_nonlocal_groups = config['configurations']['cluster-env']["fetch_nonlocal_groups"]
+
+smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
+if has_hbase_masters:
+  hbase_user_dirs = format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
+#repo params
+repo_info = config['hostLevelParams']['repo_info']
+service_repo_info = default("/hostLevelParams/service_repo_info",None)
+
+user_to_groups_dict = collections.defaultdict(lambda:[user_group])
+user_to_groups_dict[smoke_user] = [proxyuser_group]
+if has_ganglia_server:
+  user_to_groups_dict[gmond_user] = [gmond_user]
+  user_to_groups_dict[gmetad_user] = [gmetad_user]
+if has_tez:
+  user_to_groups_dict[tez_user] = [proxyuser_group]
+if has_oozie_server:
+  user_to_groups_dict[oozie_user] = [proxyuser_group]
+if has_falcon_server_hosts:
+  user_to_groups_dict[falcon_user] = [proxyuser_group]
+if has_ranger_admin:
+  user_to_groups_dict[ranger_user] = [ranger_group]
+
+user_to_gid_dict = collections.defaultdict(lambda:user_group)
+
+user_list = json.loads(config['hostLevelParams']['user_list'])
+group_list = json.loads(config['hostLevelParams']['group_list'])
+host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+
+if has_tez:
+  tez_am_view_acls = config['configurations']['tez-site']["tez.am.view-acls"]
+override_uid = str(default("/configurations/cluster-env/override_uid", "true")).lower()
+
+##### Custom extensions ######
+
+#Hadoop custom extensions
+hadoop_custom_extensions_enabled = default("/configurations/core-site/hadoop.custom-extensions.enabled", False)
+hadoop_custom_extensions_services = default("/configurations/core-site/hadoop.custom-extensions.services", "")
+hadoop_custom_extensions_owner = default("/configurations/core-site/hadoop.custom-extensions.owner", hdfs_user)
+hadoop_custom_extensions_services = [ service.strip().upper() for service in hadoop_custom_extensions_services.split(",") ]
+hadoop_custom_extensions_services.append("YARN")
+hadoop_custom_extensions_hdfs_dir = "/iop/ext/{0}/hadoop".format(stack_version_formatted)
+hadoop_custom_extensions_local_dir = "{0}/current/ext/hadoop".format(Script.get_stack_root())

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-ANY/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-ANY/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-ANY/scripts/shared_initialization.py
new file mode 100755
index 0000000..930ed1f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-ANY/scripts/shared_initialization.py
@@ -0,0 +1,253 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+import re
+import getpass
+import tempfile
+from copy import copy
+from resource_management.libraries.functions.version import compare_versions
+from resource_management import *
+
+def setup_users():
+  """
+  Creates users before cluster installation
+  """
+  import params
+  should_create_users_and_groups = not params.host_sys_prepped and not params.ignore_groupsusers_create
+
+  if should_create_users_and_groups:
+    for group in params.group_list:
+      Group(group,
+      )
+
+    for user in params.user_list:
+      if params.override_uid == "true":
+        User(user,
+            uid = get_uid(user),
+            gid = params.user_to_gid_dict[user],
+            groups = params.user_to_groups_dict[user],
+        )
+      else:
+        User(user,
+            gid = params.user_to_gid_dict[user],
+            groups = params.user_to_groups_dict[user],
+        )
+
+    if params.override_uid == "true":
+      set_uid(params.smoke_user, params.smoke_user_dirs)
+    else:
+      Logger.info('Skipping setting uid for smoke user as host is sys prepped')
+  else:
+    Logger.info('Skipping creation of User and Group as host is sys prepped or ignore_groupsusers_create flag is on')
+    pass
+
+
+  if params.has_hbase_masters:
+    Directory (params.hbase_tmp_dir,
+               owner = params.hbase_user,
+               mode=0775,
+               create_parents = True,
+               cd_access="a",
+    )
+    if not params.host_sys_prepped and params.override_uid == "true":
+      set_uid(params.hbase_user, params.hbase_user_dirs)
+    else:
+      Logger.info('Skipping setting uid for hbase user as host is sys prepped')      
+      pass
+
+  if not params.host_sys_prepped:
+    if params.has_namenode:
+      if should_create_users_and_groups:
+        create_dfs_cluster_admins()
+    if params.has_tez and params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '4.1') >= 0:
+      if should_create_users_and_groups:
+        create_tez_am_view_acls()
+  else:
+    Logger.info('Skipping setting dfs cluster admin and tez view acls as host is sys prepped')
+
+def create_dfs_cluster_admins():
+  """
+  dfs.cluster.administrators support format <comma-delimited list of usernames><space><comma-delimited list of group names>
+  """
+  import params
+
+  groups_list = create_users_and_groups(params.dfs_cluster_administrators_group)
+
+  User(params.hdfs_user,
+    groups = params.user_to_groups_dict[params.hdfs_user] + groups_list,
+          fetch_nonlocal_groups = params.fetch_nonlocal_groups
+  )
+
+def create_tez_am_view_acls():
+
+  """
+  tez.am.view-acls support format <comma-delimited list of usernames><space><comma-delimited list of group names>
+  """
+  import params
+
+  if not params.tez_am_view_acls.startswith("*"):
+    create_users_and_groups(params.tez_am_view_acls)
+
+def create_users_and_groups(user_and_groups):
+
+  import params
+
+  parts = re.split('\s', user_and_groups)
+  if len(parts) == 1:
+    parts.append("")
+
+  users_list = parts[0].strip(',').split(',') if parts[0] else []
+  groups_list = parts[1].strip(',').split(',') if parts[1] else []
+
+  if users_list:
+    User(users_list,
+          fetch_nonlocal_groups = params.fetch_nonlocal_groups
+    )
+
+  if groups_list:
+    Group(copy(groups_list),
+    )
+  return groups_list
+    
+def set_uid(user, user_dirs):
+  """
+  user_dirs - comma separated directories
+  """
+  import params
+
+  File(format("{tmp_dir}/changeUid.sh"),
+       content=StaticFile("changeToSecureUid.sh"),
+       mode=0555)
+  ignore_groupsusers_create_str = str(params.ignore_groupsusers_create).lower()
+  uid = get_uid(user)
+  Execute(format("{tmp_dir}/changeUid.sh {user} {user_dirs} {uid}"),
+          not_if = format("(test $(id -u {user}) -gt 1000) || ({ignore_groupsusers_create_str})"))
+
+def get_uid(user):
+  import params
+  import commands
+  user_str = str(user) + "_uid"
+  service_env = [ serviceEnv for serviceEnv in params.config['configurations'] if user_str in params.config['configurations'][serviceEnv]]
+
+  if service_env and params.config['configurations'][service_env[0]][user_str]:
+    service_env_str = str(service_env[0])
+    uid = params.config['configurations'][service_env_str][user_str]
+    if len(service_env) > 1:
+      Logger.warning("Multiple values found for %s, using %s"  % (user_str, uid))
+    return uid
+  else:
+    if user == params.smoke_user:
+      return 0
+    File(format("{tmp_dir}/changeUid.sh"),
+       content=StaticFile("changeToSecureUid.sh"),
+       mode=0555)
+    ignore_groupsusers_create_str = str(params.ignore_groupsusers_create).lower()
+    newUid=commands.getoutput(format("{tmp_dir}/changeUid.sh {user}"))
+    return newUid
+
+def setup_hadoop_env():
+  import params
+  stackversion = params.stack_version_unformatted
+  Logger.info("FS Type: {0}".format(params.dfs_type))
+  if params.has_namenode or stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS':
+    if params.security_enabled:
+      tc_owner = "root"
+    else:
+      tc_owner = params.hdfs_user
+
+    # create /etc/hadoop
+    Directory(params.hadoop_dir, mode=0755)
+
+    # IOP < 4.0 used a conf -> conf.empty symlink for /etc/hadoop/
+    if Script.is_stack_less_than("4.0"):
+      Directory(params.hadoop_conf_empty_dir, create_parents = True, owner="root",
+        group=params.user_group )
+
+      Link(params.hadoop_conf_dir, to=params.hadoop_conf_empty_dir,
+         not_if=format("ls {hadoop_conf_dir}"))
+
+    # write out hadoop-env.sh, but only if the directory exists
+    if os.path.exists(params.hadoop_conf_dir):
+      File(os.path.join(params.hadoop_conf_dir, 'hadoop-env.sh'), owner=tc_owner,
+        group=params.user_group,
+        content=InlineTemplate(params.hadoop_env_sh_template))
+
+    # Create tmp dir for java.io.tmpdir
+    # Handle a situation when /tmp is set to noexec
+    Directory(params.hadoop_java_io_tmpdir,
+              owner=params.hdfs_user,
+              group=params.user_group,
+              mode=01777
+    )
+
+def setup_java():
+  """
+  Installs jdk using specific params, that comes from ambari-server
+  """
+  import params
+
+  java_exec = format("{java_home}/bin/java")
+
+  if not os.path.isfile(java_exec):
+    if not params.jdk_name: # if custom jdk is used.
+      raise Fail(format("Unable to access {java_exec}. Confirm you have copied jdk to this host."))
+
+    jdk_curl_target = format("{tmp_dir}/{jdk_name}")
+    java_dir = os.path.dirname(params.java_home)
+
+    Directory(params.artifact_dir,
+              create_parents = True,
+              )
+
+    File(jdk_curl_target,
+         content = DownloadSource(format("{jdk_location}/{jdk_name}")),
+         not_if = format("test -f {jdk_curl_target}")
+    )
+
+    tmp_java_dir = tempfile.mkdtemp(prefix="jdk_tmp_", dir=params.tmp_dir)
+
+    try:
+      if params.jdk_name.endswith(".bin"):
+        chmod_cmd = ("chmod", "+x", jdk_curl_target)
+        install_cmd = format("cd {tmp_java_dir} && echo A | {jdk_curl_target} -noregister && {sudo} cp -rp {tmp_java_dir}/* {java_dir}")
+      elif params.jdk_name.endswith(".gz"):
+        chmod_cmd = ("chmod","a+x", java_dir)
+        install_cmd = format("cd {tmp_java_dir} && tar -xf {jdk_curl_target} && {sudo} cp -rp {tmp_java_dir}/* {java_dir}")
+
+      Directory(java_dir
+      )
+
+      Execute(chmod_cmd,
+              sudo = True,
+              )
+
+      Execute(install_cmd,
+              )
+
+    finally:
+      Directory(tmp_java_dir, action="delete")
+
+    File(format("{java_home}/bin/java"),
+         mode=0755,
+         cd_access="a",
+         )
+    Execute(('chmod', '-R', '755', params.java_home),
+      sudo = True,
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-INSTALL/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-INSTALL/scripts/hook.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-INSTALL/scripts/hook.py
new file mode 100755
index 0000000..ce17776
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-INSTALL/scripts/hook.py
@@ -0,0 +1,37 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from shared_initialization import *
+from repo_initialization import *
+
+class BeforeInstallHook(Hook):
+
+  def hook(self, env):
+    import params
+
+    self.run_custom_hook('before-ANY')
+    env.set_params(params)
+    
+    install_repos()
+    install_packages()
+
+if __name__ == "__main__":
+  BeforeInstallHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-INSTALL/scripts/params.py
new file mode 100755
index 0000000..6193c11
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-INSTALL/scripts/params.py
@@ -0,0 +1,113 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+from resource_management.libraries.functions.version import format_stack_version, compare_versions
+from resource_management.core.system import System
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import default, format
+from resource_management.libraries.functions.expect import expect
+
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+sudo = AMBARI_SUDO_BINARY
+
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability']
+agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", int)
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+#users and groups
+hbase_user = config['configurations']['hbase-env']['hbase_user']
+smoke_user =  config['configurations']['cluster-env']['smokeuser']
+gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
+gmond_user = config['configurations']['ganglia-env']["gmond_user"]
+tez_user = config['configurations']['tez-env']["tez_user"]
+
+user_group = config['configurations']['cluster-env']['user_group']
+proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
+
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+
+# repo templates
+repo_rhel_suse =  config['configurations']['cluster-env']['repo_suse_rhel_template']
+repo_ubuntu =  config['configurations']['cluster-env']['repo_ubuntu_template']
+
+#hosts
+hostname = config["hostname"]
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+rm_host = default("/clusterHostInfo/rm_host", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
+hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+hs_host = default("/clusterHostInfo/hs_host", [])
+jtnode_host = default("/clusterHostInfo/jtnode_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+storm_server_hosts = default("/clusterHostInfo/nimbus_hosts", [])
+falcon_host =  default('/clusterHostInfo/falcon_server_hosts', [])
+
+has_sqoop_client = 'sqoop-env' in config['configurations']
+has_namenode = not len(namenode_host) == 0
+has_hs = not len(hs_host) == 0
+has_resourcemanager = not len(rm_host) == 0
+has_slaves = not len(slave_hosts) == 0
+has_oozie_server = not len(oozie_servers)  == 0
+has_hcat_server_host = not len(hcat_server_hosts)  == 0
+has_hive_server_host = not len(hive_server_host)  == 0
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_zk_host = not len(zk_hosts) == 0
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+has_storm_server = not len(storm_server_hosts) == 0
+has_falcon_server = not len(falcon_host) == 0
+has_tez = 'tez-site' in config['configurations']
+
+is_namenode_master = hostname in namenode_host
+is_jtnode_master = hostname in jtnode_host
+is_rmnode_master = hostname in rm_host
+is_hsnode_master = hostname in hs_host
+is_hbase_master = hostname in hbase_master_hosts
+is_slave = hostname in slave_hosts
+if has_ganglia_server:
+  ganglia_server_host = ganglia_server_hosts[0]
+
+hbase_tmp_dir = "/tmp/hbase-hbase"
+
+#security params
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+#java params
+java_home = config['hostLevelParams']['java_home']
+artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
+jdk_name = default("/hostLevelParams/jdk_name", None) # None when jdk is already installed by user
+jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
+jce_location = config['hostLevelParams']['jdk_location']
+jdk_location = config['hostLevelParams']['jdk_location']
+ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False)
+host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+
+smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
+if has_hbase_masters:
+  hbase_user_dirs = format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
+#repo params
+repo_info = config['hostLevelParams']['repo_info']
+service_repo_info = default("/hostLevelParams/service_repo_info",None)

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-INSTALL/scripts/repo_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-INSTALL/scripts/repo_initialization.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-INSTALL/scripts/repo_initialization.py
new file mode 100755
index 0000000..98fa131
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-INSTALL/scripts/repo_initialization.py
@@ -0,0 +1,97 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from ambari_commons.os_check import OSCheck
+from resource_management.libraries.resources.repository import Repository
+from resource_management.core.logger import Logger
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+
+# components_lits = repoName + postfix
+_UBUNTU_REPO_COMPONENTS_POSTFIX = ["main"]
+REPO_FILE_NAME_PREFIX = 'IOP-'
+STACK_TO_ROOT_FOLDER = {"IOP": "/usr/iop", "BIGINSIGHTS":"/usr/iop"}
+
+def _alter_repo(action, repo_string, repo_template):
+  """
+  @param action: "delete" or "create"
+  @param repo_string: e.g. "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0._\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]"
+  """
+  repo_dicts = json.loads(repo_string)
+
+  if not isinstance(repo_dicts, list):
+    repo_dicts = [repo_dicts]
+
+  if 0 == len(repo_dicts):
+    Logger.info("Repository list is empty. Ambari may not be managing the repositories.")
+  else:
+    Logger.info("Initializing {0} repositories".format(str(len(repo_dicts))))
+
+  for repo in repo_dicts:
+    if not 'baseUrl' in repo:
+      repo['baseUrl'] = None
+    if not 'mirrorsList' in repo:
+      repo['mirrorsList'] = None
+    
+    ubuntu_components = [ repo['repoName'] ] + _UBUNTU_REPO_COMPONENTS_POSTFIX
+    
+    Repository(repo['repoId'],
+               action = action,
+               base_url = repo['baseUrl'],
+               mirror_list = repo['mirrorsList'],
+               repo_file_name = repo['repoName'],
+               repo_template = repo_template,
+               components = ubuntu_components, # ubuntu specific
+    )
+
+    if action == "create":
+      #Attempt to clean cache against the given repo
+      repo_id=repo['repoId']
+      print "Clean cache against " + repo_id + "; file:" + repo['repoName']
+      current_repo_ids = []
+      current_repo_files = set() 
+      
+      if OSCheck.is_ubuntu_family():
+        current_repo_files.add("base")
+        current_repo_files.add(repo['repoName'])
+      elif OSCheck.is_suse_family():
+        current_repo_ids.append("base")
+        current_repo_ids.append(repo_id)
+      else:  
+        current_repo_ids.append(repo_id)
+      
+      Repository(repo_id,
+                 action = "clearcache",
+                 base_url = repo['baseUrl'],
+                 mirror_list = repo['mirrorsList'],
+                 repo_file_name = repo['repoName'],
+                 repo_template = repo_template,
+                 components = ubuntu_components,  # ubuntu specific
+                 use_repos=list(current_repo_files) if OSCheck.is_ubuntu_family() else current_repo_ids,
+                 skip_repos=["*"] if OSCheck.is_redhat_family() else []
+      )      
+
+def install_repos():
+  import params
+  if params.host_sys_prepped:
+    return
+
+  template = params.repo_rhel_suse if OSCheck.is_suse_family() or OSCheck.is_redhat_family() else params.repo_ubuntu
+  _alter_repo("create", params.repo_info, template)
+  if params.service_repo_info:
+    _alter_repo("create", params.service_repo_info, template)

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-INSTALL/scripts/shared_initialization.py
new file mode 100755
index 0000000..3c3301c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-INSTALL/scripts/shared_initialization.py
@@ -0,0 +1,37 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from resource_management.libraries.functions import stack_tools
+from resource_management.libraries.functions.version import compare_versions
+from resource_management.core.resources.packaging import Package
+
+def install_packages():
+  import params
+  if params.host_sys_prepped:
+    return
+
+  packages = ['unzip', 'curl']
+  if params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '4.0') >= 0:
+    stack_selector_package = stack_tools.get_stack_tool_package(stack_tools.STACK_SELECTOR_NAME)
+    packages.append(stack_selector_package)
+  Package(packages,
+          retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
+          retry_count=params.agent_stack_retry_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-RESTART/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-RESTART/scripts/hook.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-RESTART/scripts/hook.py
new file mode 100755
index 0000000..14b9d99
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-RESTART/scripts/hook.py
@@ -0,0 +1,29 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+class BeforeRestartHook(Hook):
+
+  def hook(self, env):
+    self.run_custom_hook('before-START')
+
+if __name__ == "__main__":
+  BeforeRestartHook().execute()
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/files/checkForFormat.sh b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/files/checkForFormat.sh
new file mode 100755
index 0000000..68aa96d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/files/checkForFormat.sh
@@ -0,0 +1,65 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export hdfs_user=$1
+shift
+export conf_dir=$1
+shift
+export bin_dir=$1
+shift
+export mark_dir=$1
+shift
+export name_dirs=$*
+
+export EXIT_CODE=0
+export command="namenode -format"
+export list_of_non_empty_dirs=""
+
+mark_file=/var/run/hadoop/hdfs/namenode-formatted
+if [[ -f ${mark_file} ]] ; then
+  /var/lib/ambari-agent/ambari-sudo.sh rm -f ${mark_file}
+  /var/lib/ambari-agent/ambari-sudo.sh mkdir -p ${mark_dir}
+fi
+
+if [[ ! -d $mark_dir ]] ; then
+  for dir in `echo $name_dirs | tr ',' ' '` ; do
+    echo "NameNode Dirname = $dir"
+    cmd="ls $dir | wc -l  | grep -q ^0$"
+    eval $cmd
+    if [[ $? -ne 0 ]] ; then
+      (( EXIT_CODE = $EXIT_CODE + 1 ))
+      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
+    fi
+  done
+
+  if [[ $EXIT_CODE == 0 ]] ; then
+    /var/lib/ambari-agent/ambari-sudo.sh su ${hdfs_user} - -s /bin/bash -c "export PATH=$PATH:$bin_dir ; yes Y | hdfs --config ${conf_dir} ${command}"
+    (( EXIT_CODE = $EXIT_CODE | $? ))
+  else
+    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
+  fi
+else
+  echo "${mark_dir} exists. Namenode DFS already formatted"
+fi
+
+exit $EXIT_CODE
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/files/fast-hdfs-resource.jar
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/files/fast-hdfs-resource.jar b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/files/fast-hdfs-resource.jar
new file mode 100755
index 0000000..6ea0873
Binary files /dev/null and b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/files/fast-hdfs-resource.jar differ

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/files/task-log4j.properties
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/files/task-log4j.properties b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/files/task-log4j.properties
new file mode 100755
index 0000000..7e12962
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/files/task-log4j.properties
@@ -0,0 +1,134 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+#
+# Job Summary Appender 
+#
+# Use following logger to send summary to separate file defined by 
+# hadoop.mapreduce.jobsummary.log.file rolled daily:
+# hadoop.mapreduce.jobsummary.logger=INFO,JSA
+# 
+hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
+hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this 
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+# Rolling File Appender
+#
+
+#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Logfile size and and 30-day backups
+#log4j.appender.RFA.MaxFileSize=1MB
+#log4j.appender.RFA.MaxBackupIndex=30
+
+#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+# Custom Logging levels
+
+hadoop.metrics.log.level=INFO
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+#
+# Null Appender
+# Trap security logger on the hadoop client side
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+ 
+# Removes "deprecated" messages
+log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/files/topology_script.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/files/topology_script.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/files/topology_script.py
new file mode 100755
index 0000000..0f7a55c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/files/topology_script.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import sys, os
+from string import join
+import ConfigParser
+
+
+DEFAULT_RACK = "/default-rack"
+DATA_FILE_NAME =  os.path.dirname(os.path.abspath(__file__)) + "/topology_mappings.data"
+SECTION_NAME = "network_topology"
+
+class TopologyScript():
+
+  def load_rack_map(self):
+    try:
+      #RACK_MAP contains both host name vs rack and ip vs rack mappings
+      mappings = ConfigParser.ConfigParser()
+      mappings.read(DATA_FILE_NAME)
+      return dict(mappings.items(SECTION_NAME))
+    except ConfigParser.NoSectionError:
+      return {}
+
+  def get_racks(self, rack_map, args):
+    if len(args) == 1:
+      return DEFAULT_RACK
+    else:
+      return join([self.lookup_by_hostname_or_ip(input_argument, rack_map) for input_argument in args[1:]],)
+
+  def lookup_by_hostname_or_ip(self, hostname_or_ip, rack_map):
+    #try looking up by hostname
+    rack = rack_map.get(hostname_or_ip)
+    if rack is not None:
+      return rack
+    #try looking up by ip
+    rack = rack_map.get(self.extract_ip(hostname_or_ip))
+    #try by localhost since hadoop could be passing in 127.0.0.1 which might not be mapped
+    return rack if rack is not None else rack_map.get("localhost.localdomain", DEFAULT_RACK)
+
+  #strips out port and slashes in case hadoop passes in something like 127.0.0.1/127.0.0.1:50010
+  def extract_ip(self, container_string):
+    return container_string.split("/")[0].split(":")[0]
+
+  def execute(self, args):
+    rack_map = self.load_rack_map()
+    rack = self.get_racks(rack_map, args)
+    print rack
+
+if __name__ == "__main__":
+  TopologyScript().execute(sys.argv)


[15/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/YARN_widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/YARN_widgets.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/YARN_widgets.json
new file mode 100755
index 0000000..fedee4d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/YARN_widgets.json
@@ -0,0 +1,617 @@
+{
+  "layouts": [
+    {
+      "layout_name": "default_yarn_dashboard",
+      "display_name": "Standard YARN Dashboard",
+      "section_name": "YARN_SUMMARY",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "Memory Utilization",
+          "description": "Percentage of total memory allocated to containers running in the cluster.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AllocatedMB",
+              "metric_path": "metrics/yarn/Queue/root/AllocatedMB",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AvailableMB",
+              "metric_path": "metrics/yarn/Queue/root/AvailableMB",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            }
+          ],
+          "values": [
+            {
+              "name": "Memory Utilization",
+              "value": "${(yarn.QueueMetrics.Queue=root.AllocatedMB / (yarn.QueueMetrics.Queue=root.AllocatedMB + yarn.QueueMetrics.Queue=root.AvailableMB)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "CPU Utilization",
+          "description": "Percentage of total virtual cores allocated to containers running in the cluster.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AllocatedVCores",
+              "metric_path": "metrics/yarn/Queue/root/AllocatedVCores",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AvailableVCores",
+              "metric_path": "metrics/yarn/Queue/root/AvailableVCores",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            }
+          ],
+          "values": [
+            {
+              "name": "Total Allocatable CPU Utilized across NodeManager",
+              "value": "${(yarn.QueueMetrics.Queue=root.AllocatedVCores / (yarn.QueueMetrics.Queue=root.AllocatedVCores + yarn.QueueMetrics.Queue=root.AvailableVCores)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Container Failures",
+          "description": "Percentage of all containers failing in the cluster.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersFailed._sum",
+              "metric_path": "metrics/yarn/ContainersFailed._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersCompleted._sum",
+              "metric_path": "metrics/yarn/ContainersCompleted._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersLaunched._sum",
+              "metric_path": "metrics/yarn/ContainersLaunched._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersIniting._sum",
+              "metric_path": "metrics/yarn/ContainersIniting._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersKilled._sum",
+              "metric_path": "metrics/yarn/ContainersKilled._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersRunning._sum",
+              "metric_path": "metrics/yarn/ContainersRunning._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Container Failures",
+              "value": "${(yarn.NodeManagerMetrics.ContainersFailed._sum/(yarn.NodeManagerMetrics.ContainersFailed._sum + yarn.NodeManagerMetrics.ContainersCompleted._sum + yarn.NodeManagerMetrics.ContainersLaunched._sum + yarn.NodeManagerMetrics.ContainersIniting._sum + yarn.NodeManagerMetrics.ContainersKilled._sum + yarn.NodeManagerMetrics.ContainersRunning._sum)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "App Failures",
+          "description": "Percentage of all launched applications failing in the cluster.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsFailed",
+              "metric_path": "metrics/yarn/Queue/root/AppsFailed",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsKilled",
+              "metric_path": "metrics/yarn/Queue/root/AppsKilled",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsPending",
+              "metric_path": "metrics/yarn/Queue/root/AppsPending",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsRunning",
+              "metric_path": "metrics/yarn/Queue/root/AppsRunning",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsSubmitted",
+              "metric_path": "metrics/yarn/Queue/root/AppsSubmitted",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsCompleted",
+              "metric_path": "metrics/yarn/Queue/root/AppsCompleted",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            }
+          ],
+          "values": [
+            {
+              "name": "App Failures",
+              "value": "${(yarn.QueueMetrics.Queue=root.AppsFailed/(yarn.QueueMetrics.Queue=root.AppsFailed + yarn.QueueMetrics.Queue=root.AppsKilled + yarn.QueueMetrics.Queue=root.AppsPending + yarn.QueueMetrics.Queue=root.AppsRunning + yarn.QueueMetrics.Queue=root.AppsSubmitted + yarn.QueueMetrics.Queue=root.AppsCompleted)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Pending Apps",
+          "description": "Count of applications waiting for cluster resources to become available.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsPending",
+              "metric_path": "metrics/yarn/Queue/root/AppsPending",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            }
+          ],
+          "values": [
+            {
+              "name": "Pending Apps",
+              "value": "${yarn.QueueMetrics.Queue=root.AppsPending}"
+            }
+          ],
+          "properties": {
+            "display_unit": "Apps",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Cluster Memory",
+          "description": "Percentage of memory used across all NodeManager hosts.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "mem_total._sum",
+              "metric_path": "metrics/memory/mem_total._avg",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "mem_free._sum",
+              "metric_path": "metrics/memory/mem_free._avg",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "mem_cached._sum",
+              "metric_path": "metrics/memory/mem_cached._avg",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Memory utilization",
+              "value": "${((mem_total._sum - mem_free._sum - mem_cached._sum)/mem_total._sum) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Cluster Disk",
+          "description": "Sum of disk throughput for all NodeManager hosts.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "read_bps._sum",
+              "metric_path": "metrics/disk/read_bps._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "write_bps._sum",
+              "metric_path": "metrics/disk/write_bps._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Read throughput",
+              "value": "${read_bps._sum/1048576}"
+            },
+            {
+              "name": "Write throughput",
+              "value": "${write_bps._sum/1048576}"
+            }
+          ],
+          "properties": {
+            "display_unit": "Mbps",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Cluster Network",
+          "description": "Average of Network utilized across all NodeManager hosts.",
+          "default_section_name": "YARN_SUMMARY",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "pkts_in._avg",
+              "metric_path": "metrics/network/pkts_in._avg",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "pkts_out._avg",
+              "metric_path": "metrics/network/pkts_out._avg",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Packets In",
+              "value": "${pkts_in._avg}"
+            },
+            {
+              "name": "Packets Out",
+              "value": "${pkts_out._avg}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Cluster CPU",
+          "description": "Percentage of CPU utilized across all NodeManager hosts.",
+          "default_section_name": "YARN_SUMMARY",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "cpu_system._sum",
+              "metric_path": "metrics/cpu/cpu_system._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "cpu_user._sum",
+              "metric_path": "metrics/cpu/cpu_user._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "cpu_nice._sum",
+              "metric_path": "metrics/cpu/cpu_nice._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "cpu_idle._sum",
+              "metric_path": "metrics/cpu/cpu_idle._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "cpu_wio._sum",
+              "metric_path": "metrics/cpu/cpu_wio._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "CPU utilization",
+              "value": "${((cpu_system._sum + cpu_user._sum + cpu_nice._sum)/(cpu_system._sum + cpu_user._sum + cpu_nice._sum + cpu_idle._sum + cpu_wio._sum)) * 100}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1",
+            "display_unit": "%"
+          }
+        }
+      ]
+    },
+    {
+      "layout_name": "default_yarn_heatmap",
+      "display_name": "YARN Heatmaps",
+      "section_name": "YARN_HEATMAPS",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "Total Allocatable RAM Utilized per NodeManager",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.AllocatedGB",
+              "metric_path": "metrics/yarn/AllocatedGB",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.AvailableGB",
+              "metric_path": "metrics/yarn/AvailableGB",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Total Allocatable RAM Utilized per NodeManager",
+              "value": "${(yarn.NodeManagerMetrics.AllocatedGB/(yarn.NodeManagerMetrics.AvailableGB + yarn.NodeManagerMetrics.AllocatedGB)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Total Allocatable CPU Utilized per NodeManager",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.AllocatedVCores",
+              "metric_path": "metrics/yarn/AllocatedVCores",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.AvailableVCores",
+              "metric_path": "metrics/yarn/AvailableVCores",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Total Allocatable CPU Utilized per NodeManager",
+              "value": "${(yarn.NodeManagerMetrics.AllocatedVCores/(yarn.NodeManagerMetrics.AllocatedVCores + yarn.NodeManagerMetrics.AvailableVCores)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Container Failures",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersFailed",
+              "metric_path": "metrics/yarn/ContainersFailed",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersCompleted",
+              "metric_path": "metrics/yarn/ContainersCompleted",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersLaunched",
+              "metric_path": "metrics/yarn/ContainersLaunched",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersIniting",
+              "metric_path": "metrics/yarn/ContainersIniting",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersKilled",
+              "metric_path": "metrics/yarn/ContainersKilled",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersRunning",
+              "metric_path": "metrics/yarn/ContainersRunning",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Container Failures",
+              "value": "${(yarn.NodeManagerMetrics.ContainersFailed/(yarn.NodeManagerMetrics.ContainersFailed + yarn.NodeManagerMetrics.ContainersCompleted + yarn.NodeManagerMetrics.ContainersLaunched + yarn.NodeManagerMetrics.ContainersIniting + yarn.NodeManagerMetrics.ContainersKilled + yarn.NodeManagerMetrics.ContainersRunning)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "NodeManager GC Time",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=NodeManager,name=JvmMetrics.GcTimeMillis",
+              "metric_path": "metrics/jvm/gcTimeMillis",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "NodeManager Garbage Collection Time",
+              "value": "${Hadoop:service=NodeManager,name=JvmMetrics.GcTimeMillis}"
+            }
+          ],
+          "properties": {
+            "display_unit": "ms",
+            "max_limit": "10000"
+          }
+        },
+        {
+          "widget_name": "NodeManager JVM Heap Memory Used",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=NodeManager,name=JvmMetrics.MemHeapUsedM",
+              "metric_path": "metrics/jvm/memHeapUsedM",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "NodeManager JVM Heap Memory Used",
+              "value": "${Hadoop:service=NodeManager,name=JvmMetrics.MemHeapUsedM}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "max_limit": "512"
+          }
+        },
+        {
+          "widget_name": "Allocated Containers",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.AllocatedContainers",
+              "metric_path": "metrics/yarn/AllocatedContainers",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Allocated Containers",
+              "value": "${yarn.NodeManagerMetrics.AllocatedContainers}"
+            }
+          ],
+          "properties": {
+            "display_unit": "",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "NodeManager RAM Utilized",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.AllocatedGB",
+              "metric_path": "metrics/yarn/AllocatedGB",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "NodeManager RAM Utilized",
+              "value": "${yarn.NodeManagerMetrics.AllocatedGB}"
+            }
+          ],
+          "properties": {
+            "display_unit": "",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "NodeManager CPU Utilized",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.AllocatedVCores",
+              "metric_path": "metrics/yarn/AllocatedVCores",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "NodeManager CPU Utilized",
+              "value": "${yarn.NodeManagerMetrics.AllocatedVCores}"
+            }
+          ],
+          "properties": {
+            "display_unit": "",
+            "max_limit": "100"
+          }
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/alerts.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/alerts.json
new file mode 100755
index 0000000..3338f59
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/alerts.json
@@ -0,0 +1,398 @@
+{
+  "MAPREDUCE2": {
+    "service": [],
+    "HISTORYSERVER": [
+      {
+        "name": "mapreduce_history_server_webui",
+        "label": "History Server Web UI",
+        "description": "This host-level alert is triggered if the History Server Web UI is unreachable.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "WEB",
+          "uri": {
+            "http": "{{mapred-site/mapreduce.jobhistory.webapp.address}}",
+            "https": "{{mapred-site/mapreduce.jobhistory.webapp.https.address}}",
+            "https_property": "{{mapred-site/mapreduce.jobhistory.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "kerberos_keytab": "{{mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file}}",
+            "kerberos_principal": "{{mapred-site/mapreduce.jobhistory.webapp.spnego-principal}}"
+          },
+          "reporting": {
+            "ok": {
+              "text": "HTTP {0} response in {2:.3f}s"
+            },
+            "warning":{
+              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
+            },
+            "critical": {
+              "text": "Connection failed to {1} ({3})"
+            }
+          }
+        }
+      },
+      {
+        "name": "mapreduce_history_server_cpu",
+        "label": "History Server CPU Utilization",
+        "description": "This host-level alert is triggered if the percent of CPU utilization on the History Server exceeds the configured critical threshold. The threshold values are in percent.",
+        "interval": 5,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{mapred-site/mapreduce.jobhistory.webapp.address}}",
+            "https": "{{mapred-site/mapreduce.jobhistory.webapp.https.address}}",
+            "https_property": "{{mapred-site/mapreduce.jobhistory.http.policy}}",
+            "https_property_value": "HTTPS_ONLY"
+          },
+          "reporting": {
+            "ok": {
+              "text": "{1} CPU, load {0:.1%}"
+            },
+            "warning": {
+              "text": "{1} CPU, load {0:.1%}",
+              "value": 200
+            },
+            "critical": {
+              "text": "{1} CPU, load {0:.1%}",
+              "value": 250
+            },
+            "units" : "%"
+          },
+          "jmx": {
+            "property_list": [
+              "java.lang:type=OperatingSystem/SystemCpuLoad",
+              "java.lang:type=OperatingSystem/AvailableProcessors"
+            ],
+            "value": "{0} * 100"
+          }
+        }
+      },
+      {
+        "name": "mapreduce_history_server_rpc_latency",
+        "label": "History Server RPC Latency",
+        "description": "This host-level alert is triggered if the History Server operations RPC latency exceeds the configured critical threshold. Typically an increase in the RPC processing time increases the RPC queue length, causing the average queue wait time to increase for operations. The threshold values are in milliseconds.",
+        "interval": 5,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{mapred-site/mapreduce.jobhistory.webapp.address}}",
+            "https": "{{mapred-site/mapreduce.jobhistory.webapp.https.address}}",
+            "https_property": "{{mapred-site/mapreduce.jobhistory.http.policy}}",
+            "https_property_value": "HTTPS_ONLY"
+          },
+          "reporting": {
+            "ok": {
+              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]"
+            },
+            "warning": {
+              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]",
+              "value": 3000
+            },
+            "critical": {
+              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]",
+              "value": 5000
+            },
+            "units" : "ms"
+          },
+          "jmx": {
+            "property_list": [
+              "Hadoop:service=JobHistoryServer,name=RpcActivityForPort*/RpcQueueTimeAvgTime",
+              "Hadoop:service=JobHistoryServer,name=RpcActivityForPort*/RpcProcessingTimeAvgTime"
+            ],
+            "value": "{0}"
+          }
+        }
+      },
+      {
+        "name": "mapreduce_history_server_process",
+        "label": "History Server Process",
+        "description": "This host-level alert is triggered if the History Server process cannot be established to be up and listening on the network.",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "PORT",
+          "uri": "{{mapred-site/mapreduce.jobhistory.webapp.address}}",
+          "default_port": 19888,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ]
+  },
+  "YARN": {
+    "service": [
+      {
+        "name": "yarn_nodemanager_webui_percent",
+        "label": "Percent NodeManagers Available",
+        "description": "This alert is triggered if the number of down NodeManagers in the cluster is greater than the configured critical threshold. It aggregates the results of NodeManager process checks.",
+        "interval": 1,
+        "scope": "SERVICE",
+        "enabled": true,
+        "source": {
+          "type": "AGGREGATE",
+          "alert_name": "yarn_nodemanager_webui",
+          "reporting": {
+            "ok": {
+              "text": "affected: [{1}], total: [{0}]"
+            },
+            "warning": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 0.1
+            },
+            "critical": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 0.3
+            }
+          }
+        }
+      }
+    ],
+    "NODEMANAGER": [
+      {
+        "name": "yarn_nodemanager_webui",
+        "label": "NodeManager Web UI",
+        "description": "This host-level alert is triggered if the NodeManager Web UI is unreachable.",
+        "interval": 1,
+        "scope": "HOST",
+        "source": {
+          "type": "WEB",
+          "uri": {
+            "http": "{{yarn-site/yarn.nodemanager.webapp.address}}",
+            "https": "{{yarn-site/yarn.nodemanager.webapp.https.address}}",
+            "https_property": "{{yarn-site/yarn.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "default_port": 8042,
+            "kerberos_keytab": "{{yarn-site/yarn.nodemanager.webapp.spnego-keytab-file}}",
+            "kerberos_principal": "{{yarn-site/yarn.nodemanager.webapp.spnego-principal}}"
+          },
+          "reporting": {
+            "ok": {
+              "text": "HTTP {0} response in {2:.3f}s"
+            },
+            "warning":{
+              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
+            },
+            "critical": {
+              "text": "Connection failed to {1} ({3})"
+            }
+          }
+        }
+      },
+      {
+        "name": "yarn_nodemanager_health",
+        "label": "NodeManager Health",
+        "description": "This host-level alert checks the node health property available from the NodeManager component.",
+        "interval": 1,
+        "scope": "HOST",
+        "enabled": true,
+        "source": {
+          "type": "SCRIPT",
+          "path": "YARN/2.1.0.2.0/package/alerts/alert_nodemanager_health.py",
+          "parameters": [
+            {
+              "name": "connection.timeout",
+              "display_name": "Connection Timeout",
+              "value": 5.0,
+              "type": "NUMERIC",
+              "description": "The maximum time before this alert is considered to be CRITICAL",
+              "units": "seconds",
+              "threshold": "CRITICAL"
+            }
+          ]
+        }
+      }
+    ],
+    "RESOURCEMANAGER": [
+      {
+        "name": "yarn_resourcemanager_webui",
+        "label": "ResourceManager Web UI",
+        "description": "This host-level alert is triggered if the ResourceManager Web UI is unreachable.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "WEB",
+          "uri": {
+            "http": "{{yarn-site/yarn.resourcemanager.webapp.address}}",
+            "https": "{{yarn-site/yarn.resourcemanager.webapp.https.address}}",
+            "https_property": "{{yarn-site/yarn.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "kerberos_keytab": "{{yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file}}",
+            "kerberos_principal": "{{yarn-site/yarn.resourcemanager.webapp.spnego-principal}}",
+            "high_availability": {
+              "alias_key" : "{{yarn-site/yarn.resourcemanager.ha.rm-ids}}",
+              "http_pattern" : "{{yarn-site/yarn.resourcemanager.webapp.address.{{alias}}}}",
+              "https_pattern" : "{{yarn-site/yarn.resourcemanager.webapp.https.address.{{alias}}}}"
+            }
+          },
+          "reporting": {
+            "ok": {
+              "text": "HTTP {0} response in {2:.3f}s"
+            },
+            "warning":{
+              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
+            },
+            "critical": {
+              "text": "Connection failed to {1} ({3})"
+            }
+          }
+        }
+      },
+      {
+        "name": "yarn_resourcemanager_cpu",
+        "label": "ResourceManager CPU Utilization",
+        "description": "This host-level alert is triggered if CPU utilization of the ResourceManager exceeds certain warning and critical thresholds. It checks the ResourceManager JMX Servlet for the SystemCPULoad property. The threshold values are in percent.",
+        "interval": 5,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{yarn-site/yarn.resourcemanager.webapp.address}}",
+            "https": "{{yarn-site/yarn.resourcemanager.webapp.https.address}}",
+            "https_property": "{{yarn-site/yarn.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "high_availability": {
+              "alias_key" : "{{yarn-site/yarn.resourcemanager.ha.rm-ids}}",
+              "http_pattern" : "{{yarn-site/yarn.resourcemanager.webapp.address.{{alias}}}}",
+              "https_pattern" : "{{yarn-site/yarn.resourcemanager.webapp.https.address.{{alias}}}}"
+            }
+          },
+          "reporting": {
+            "ok": {
+              "text": "{1} CPU, load {0:.1%}"
+            },
+            "warning": {
+              "text": "{1} CPU, load {0:.1%}",
+              "value": 200
+            },
+            "critical": {
+              "text": "{1} CPU, load {0:.1%}",
+              "value": 250
+            },
+            "units" : "%"
+          },
+          "jmx": {
+            "property_list": [
+              "java.lang:type=OperatingSystem/SystemCpuLoad",
+              "java.lang:type=OperatingSystem/AvailableProcessors"
+            ],
+            "value": "{0} * 100"
+          }
+        }
+      },
+      {
+        "name": "yarn_resourcemanager_rpc_latency",
+        "label": "ResourceManager RPC Latency",
+        "description": "This host-level alert is triggered if the ResourceManager operations RPC latency exceeds the configured critical threshold. Typically an increase in the RPC processing time increases the RPC queue length, causing the average queue wait time to increase for ResourceManager operations. The threshold values are in milliseconds.",
+        "interval": 5,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{yarn-site/yarn.resourcemanager.webapp.address}}",
+            "https": "{{yarn-site/yarn.resourcemanager.webapp.https.address}}",
+            "https_property": "{{yarn-site/yarn.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "high_availability": {
+              "alias_key" : "{{yarn-site/yarn.resourcemanager.ha.rm-ids}}",
+              "http_pattern" : "{{yarn-site/yarn.resourcemanager.webapp.address.{{alias}}}}",
+              "https_pattern" : "{{yarn-site/yarn.resourcemanager.webapp.https.address.{{alias}}}}"
+            }
+          },
+          "reporting": {
+            "ok": {
+              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]"
+            },
+            "warning": {
+              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]",
+              "value": 3000
+            },
+            "critical": {
+              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]",
+              "value": 5000
+            },
+            "units" : "ms"
+          },
+          "jmx": {
+            "property_list": [
+              "Hadoop:service=ResourceManager,name=RpcActivityForPort*/RpcQueueTimeAvgTime",
+              "Hadoop:service=ResourceManager,name=RpcActivityForPort*/RpcProcessingTimeAvgTime"
+            ],
+            "value": "{0}"
+          }
+        }
+      },
+      {
+        "name": "nodemanager_health_summary",
+        "label": "NodeManager Health Summary",
+        "description": "This service-level alert is triggered if there are unhealthy NodeManagers",
+        "interval": 1,
+        "scope": "SERVICE",
+        "enabled": true,
+        "source": {
+          "type": "SCRIPT",
+          "path": "YARN/2.1.0.2.0/package/alerts/alert_nodemanagers_summary.py",
+          "parameters": [
+            {
+              "name": "connection.timeout",
+              "display_name": "Connection Timeout",
+              "value": 5.0,
+              "type": "NUMERIC",
+              "description": "The maximum time before this alert is considered to be CRITICAL",
+              "units": "seconds",
+              "threshold": "CRITICAL"
+            }
+          ]
+        }
+      }
+    ],
+    "APP_TIMELINE_SERVER": [
+      {
+        "name": "yarn_app_timeline_server_webui",
+        "label": "App Timeline Web UI",
+        "description": "This host-level alert is triggered if the App Timeline Server Web UI is unreachable.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "WEB",
+          "uri": {
+            "http": "{{yarn-site/yarn.timeline-service.webapp.address}}",
+            "https": "{{yarn-site/yarn.timeline-service.webapp.https.address}}",
+            "https_property": "{{yarn-site/yarn.http.policy}}",
+            "https_property_value": "HTTPS_ONLY",
+            "kerberos_keytab": "{{yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab}}",
+            "kerberos_principal": "{{yarn-site/yarn.timeline-service.http-authentication.kerberos.principal}}"
+          },
+          "reporting": {
+            "ok": {
+              "text": "HTTP {0} response in {2:.3f}s"
+            },
+            "warning":{
+              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
+            },
+            "critical": {
+              "text": "Connection failed to {1} ({3})"
+            }
+          }
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/configuration-mapred/mapred-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/configuration-mapred/mapred-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/configuration-mapred/mapred-env.xml
new file mode 100755
index 0000000..11676d9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/configuration-mapred/mapred-env.xml
@@ -0,0 +1,82 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>mapred_log_dir_prefix</name>
+    <display-name>Mapreduce Log Dir Prefix</display-name>
+    <value>/var/log/hadoop-mapreduce</value>
+    <description>Mapreduce Log Dir Prefix</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+  </property>
+  <property>
+    <name>mapred_pid_dir_prefix</name>
+    <display-name>Mapreduce PID Dir Prefix</display-name>
+    <value>/var/run/hadoop-mapreduce</value>
+    <description>Mapreduce PID Dir Prefix</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+  </property>
+  <property>
+    <name>mapred_user</name>
+    <value>mapred</value>
+    <property-type>USER</property-type>
+    <description>Mapreduce User</description>
+  </property>
+  <property>
+    <name>jobhistory_heapsize</name>
+    <display-name>History Server heap size</display-name>
+    <value>900</value>
+    <description>Value for JobHistoryServer heap_size variable in hadoop-env.sh</description>
+    <value-attributes>
+      <unit>MB</unit>
+      <type>int</type>
+    </value-attributes>
+  </property>
+
+  <!-- mapred-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for mapred-env.sh file</description>
+    <value>
+# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
+
+export HADOOP_JOB_HISTORYSERVER_HEAPSIZE={{jobhistory_heapsize}}
+
+export HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA
+
+#export HADOOP_JOB_HISTORYSERVER_OPTS=
+#export HADOOP_MAPRED_LOG_DIR="" # Where log files are stored.  $HADOOP_MAPRED_HOME/logs by default.
+#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.
+#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.
+#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default
+#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.
+    </value>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/configuration-mapred/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/configuration-mapred/mapred-site.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/configuration-mapred/mapred-site.xml
new file mode 100755
index 0000000..4f39d21
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/configuration-mapred/mapred-site.xml
@@ -0,0 +1,479 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
+
+<!-- i/o properties -->
+
+  <property>
+    <name>mapreduce.task.io.sort.mb</name>
+    <value>512</value>
+    <description>
+      The total amount of buffer memory to use while sorting files, in megabytes.
+      By default, gives each merge stream 1MB, which should minimize seeks.
+    </description>
+    <display-name>Sort Allocation Memory</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>2047</maximum>
+      <unit>MB</unit>
+      <increment-step>1</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>mapred-site</type>
+        <name>mapreduce.map.memory.mb</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>mapreduce.map.sort.spill.percent</name>
+    <value>0.7</value>
+    <description>
+      The soft limit in the serialization buffer. Once reached, a thread will
+      begin to spill the contents to disk in the background. Note that
+      collection will not block if this threshold is exceeded while a spill
+      is already in progress, so spills may be larger than this threshold when
+      it is set to less than .5
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.task.io.sort.factor</name>
+    <value>100</value>
+    <description>
+      The number of streams to merge at once while sorting files.
+      This determines the number of open file handles.
+    </description>
+  </property>
+
+<!-- map/reduce properties -->
+  <property>
+    <name>mapreduce.cluster.administrators</name>
+    <value> hadoop</value>
+    <description>
+      Administrators for MapReduce applications.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.reduce.shuffle.parallelcopies</name>
+    <value>30</value>
+    <description>
+      The default number of parallel transfers run by reduce during
+      the copy(shuffle) phase.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.map.speculative</name>
+    <value>false</value>
+    <description>
+      If true, then multiple instances of some map tasks
+      may be executed in parallel.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.reduce.speculative</name>
+    <value>false</value>
+    <description>
+      If true, then multiple instances of some reduce tasks may be
+      executed in parallel.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.job.reduce.slowstart.completedmaps</name>
+    <value>0.05</value>
+    <description>
+      Fraction of the number of maps in the job which should be complete before
+      reduces are scheduled for the job.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.job.counters.max</name>
+    <value>130</value>
+    <description>
+      Limit on the number of counters allowed per job.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.reduce.shuffle.merge.percent</name>
+    <value>0.66</value>
+    <description>
+      The usage threshold at which an in-memory merge will be
+      initiated, expressed as a percentage of the total memory allocated to
+      storing in-memory map outputs, as defined by
+      mapreduce.reduce.shuffle.input.buffer.percent.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.reduce.shuffle.input.buffer.percent</name>
+    <value>0.7</value>
+    <description>
+      The percentage of memory to be allocated from the maximum heap
+      size to storing map outputs during the shuffle.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.output.fileoutputformat.compress.type</name>
+    <value>BLOCK</value>
+    <description>
+      If the job outputs are to compressed as SequenceFiles, how should
+      they be compressed? Should be one of NONE, RECORD or BLOCK.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.reduce.input.buffer.percent</name>
+    <value>0.0</value>
+    <description>
+      The percentage of memory- relative to the maximum heap size- to
+      retain map outputs during the reduce. When the shuffle is concluded, any
+      remaining map outputs in memory must consume less than this threshold before
+      the reduce can begin.
+    </description>
+  </property>
+
+  <!-- copied from kryptonite configuration -->
+  <property>
+    <name>mapreduce.map.output.compress</name>
+    <value>false</value>
+    <description>
+      Should the outputs of the maps be compressed before being sent across the network. Uses SequenceFile compression.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.task.timeout</name>
+    <value>300000</value>
+    <description>
+      The number of milliseconds before a task will be
+      terminated if it neither reads an input, writes an output, nor
+      updates its status string.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.map.memory.mb</name>
+    <value>1024</value>
+    <description>Virtual memory for single Map task</description>
+    <display-name>Map Memory</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>512</minimum>
+      <maximum>5120</maximum>
+      <unit>MB</unit>
+      <increment-step>250</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.scheduler.maximum-allocation-mb</name>
+      </property>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.scheduler.minimum-allocation-mb</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>mapreduce.reduce.memory.mb</name>
+    <value>1024</value>
+    <description>Virtual memory for single Reduce task</description>
+    <display-name>Reduce Memory</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>512</minimum>
+      <maximum>5120</maximum>
+      <unit>MB</unit>
+      <increment-step>250</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.scheduler.maximum-allocation-mb</name>
+      </property>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.scheduler.minimum-allocation-mb</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>mapreduce.shuffle.port</name>
+    <value>13562</value>
+    <description>
+      Default port that the ShuffleHandler will run on.
+      ShuffleHandler is a service run at the NodeManager to facilitate
+      transfers of intermediate Map outputs to requesting Reducers.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.jobhistory.intermediate-done-dir</name>
+    <value>/mr-history/tmp</value>
+    <description>
+      Directory where history files are written by MapReduce jobs.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.jobhistory.done-dir</name>
+    <value>/mr-history/done</value>
+    <description>
+      Directory where history files are managed by the MR JobHistory Server.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.jobhistory.address</name>
+    <value>localhost:10020</value>
+    <description>Enter your JobHistoryServer hostname.</description>
+  </property>
+
+  <property>
+    <name>mapreduce.jobhistory.webapp.address</name>
+    <value>localhost:19888</value>
+    <description>Enter your JobHistoryServer hostname.</description>
+  </property>
+
+  <property>
+    <name>mapreduce.framework.name</name>
+    <value>yarn</value>
+    <description>
+      The runtime framework for executing MapReduce jobs. Can be one of local,
+      classic or yarn.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.app.mapreduce.am.staging-dir</name>
+    <value>/user</value>
+    <description>
+      The staging dir used while submitting jobs.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.app.mapreduce.am.resource.mb</name>
+    <value>512</value>
+    <description>The amount of memory the MR AppMaster needs.</description>
+    <display-name>AppMaster Memory</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>512</minimum>
+      <maximum>5120</maximum>
+      <unit>MB</unit>
+      <increment-step>250</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.scheduler.maximum-allocation-mb</name>
+      </property>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.scheduler.minimum-allocation-mb</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>yarn.app.mapreduce.am.command-opts</name>
+    <value>-Xmx312m -Diop.version=${iop.version}</value>
+    <description>
+      Java opts for the MR App Master processes.
+      The following symbol, if present, will be interpolated: @taskid@ is replaced
+      by current TaskID. Any other occurrences of '@' will go unchanged.
+      For example, to enable verbose gc logging to a file named for the taskid in
+      /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of:
+      -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc
+
+      Usage of -Djava.library.path can cause programs to no longer function if
+      hadoop native libraries are used. These values should instead be set as part
+      of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
+      mapreduce.reduce.env config settings.
+    </description>
+    <display-name>MR AppMaster Java Heap Size</display-name>
+    <depends-on>
+      <property>
+        <type>mapred-site</type>
+        <name>yarn.app.mapreduce.am.resource.mb</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>yarn.app.mapreduce.am.admin-command-opts</name>
+    <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN -Diop.version=${iop.version}</value>
+    <description>
+      Java opts for the MR App Master processes for admin purposes.
+      It will appears before the opts set by yarn.app.mapreduce.am.command-opts and
+      thus its options can be overridden user.
+
+      Usage of -Djava.library.path can cause programs to no longer function if
+      hadoop native libraries are used. These values should instead be set as part
+      of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
+      mapreduce.reduce.env config settings.
+    </description>
+    <display-name>MR AppMaster Java Heap Size</display-name>
+    <depends-on>
+      <property>
+        <type>mapred-site</type>
+        <name>yarn.app.mapreduce.am.resource.mb</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>yarn.app.mapreduce.am.log.level</name>
+    <value>INFO</value>
+    <description>MR App Master process log level.</description>
+  </property>
+
+  <property>
+    <name>mapreduce.admin.map.child.java.opts</name>
+    <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN -Diop.version=${iop.version}</value>
+    <description>This property stores Java options for map tasks.</description>
+  </property>
+
+  <property>
+    <name>mapreduce.admin.reduce.child.java.opts</name>
+    <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN -Diop.version=${iop.version}</value>
+    <description>This property stores Java options for reduce tasks.</description>
+  </property>
+
+  <property>
+    <name>mapreduce.application.classpath</name>
+    <value>/etc/hadoop/conf/:$PWD/mr-framework/hadoop/share/hadoop/mapreduce/*:$PWD/mr-framework/hadoop/share/hadoop/mapreduce/lib/*:$PWD/mr-framework/hadoop/share/hadoop/common/*:$PWD/mr-framework/hadoop/share/hadoop/common/lib/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/lib/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/lib/*:/usr/iop/${iop.version}/hadoop/lib/hadoop-lzo-0.5.1.jar:/etc/hadoop/conf/secure</value>
+    <description>
+      CLASSPATH for MR applications. A comma-separated list of CLASSPATH
+      entries.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.application.framework.path</name>
+    <value>/iop/apps/${iop.version}/mapreduce/mapreduce.tar.gz#mr-framework</value>
+    <description></description>
+  </property>
+
+
+  <property>
+    <name>mapreduce.am.max-attempts</name>
+    <value>2</value>
+    <description>
+      The maximum number of application attempts. It is a
+      application-specific setting. It should not be larger than the global number
+      set by resourcemanager. Otherwise, it will be override. The default number is
+      set to 2, to allow at least one retry for AM.
+    </description>
+  </property>
+
+
+
+  <property>
+    <name>mapreduce.map.java.opts</name>
+    <value>-Xmx756m</value>
+    <description>
+      Larger heap-size for child jvms of maps.
+    </description>
+    <display-name>MR Map Java Heap Size</display-name>
+    <depends-on>
+      <property>
+        <type>mapred-site</type>
+        <name>mapreduce.map.memory.mb</name>
+      </property>
+    </depends-on>
+  </property>
+
+
+  <property>
+    <name>mapreduce.reduce.java.opts</name>
+    <value>-Xmx756m</value>
+    <description>
+      Larger heap-size for child jvms of reduces.
+    </description>
+    <display-name>MR Reduce Java Heap Size</display-name>
+    <depends-on>
+      <property>
+        <type>mapred-site</type>
+        <name>mapreduce.reduce.memory.mb</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>mapreduce.map.log.level</name>
+    <value>INFO</value>
+    <description>
+      The logging level for the map task. The allowed levels are:
+      OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.reduce.log.level</name>
+    <value>INFO</value>
+    <description>
+      The logging level for the reduce task. The allowed levels are:
+      OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.admin.user.env</name>
+    <value>LD_LIBRARY_PATH=/usr/iop/${iop.version}/hadoop/lib/native</value>
+    <description>
+      Additional execution environment entries for map and reduce task processes.
+      This is not an additive property. You must preserve the original value if
+      you want your map and reduce tasks to have access to native libraries (compression, etc)
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.app.mapreduce.am.env</name>
+    <value>LD_LIBRARY_PATH=/usr/iop/${iop.version}/hadoop/lib/native</value>
+    <description>
+      User added environment variables for the MR App Master processes.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.output.fileoutputformat.compress</name>
+    <value>false</value>
+    <description>
+      Should the job outputs be compressed?
+    </description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/configuration/capacity-scheduler.xml
new file mode 100755
index 0000000..35404c6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/configuration/capacity-scheduler.xml
@@ -0,0 +1,157 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>yarn.scheduler.capacity.maximum-applications</name>
+    <value>10000</value>
+    <description>
+      Maximum number of applications that can be pending and running.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
+    <value>0.2</value>
+    <description>
+      Maximum percent of resources in the cluster which can be used to run
+      application masters i.e. controls number of concurrent running
+      applications.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.queues</name>
+    <value>default</value>
+    <description>
+      The queues at the this level (root is the root queue).
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.capacity</name>
+    <value>100</value>
+    <description>
+      The total capacity as a percentage out of 100 for this queue.
+      If it has child queues then this includes their capacity as well.
+      The child queues capacity should add up to their parent queue's capacity
+      or less.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.capacity</name>
+    <value>100</value>
+    <description>Default queue target capacity.</description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
+    <value>1</value>
+    <description>
+      Default queue user limit a percentage from 0.0 to 1.0.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
+    <value>100</value>
+    <description>
+      The maximum capacity of the default queue.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.state</name>
+    <value>RUNNING</value>
+    <description>
+      The state of the default queue. State can be one of RUNNING or STOPPED.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.acl_submit_applications</name>
+    <value>*</value>
+    <description>
+      The ACL of who can submit jobs to the default queue.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.acl_administer_jobs</name>
+    <value>*</value>
+    <description>
+      The ACL of who can administer jobs on the default queue.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.acl_administer_queue</name>
+    <value>*</value>
+    <description>
+      The ACL for who can administer this queue i.e. change sub-queue
+      allocations.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.node-locality-delay</name>
+    <value>40</value>
+    <description>
+      Number of missed scheduling opportunities after which the CapacityScheduler
+      attempts to schedule rack-local containers.
+      Typically this should be set to number of nodes in the cluster, By default is setting
+      approximately number of nodes in one rack which is 40.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.default.minimum-user-limit-percent</name>
+    <value>100</value>
+    <description>
+      Default minimum queue resource limit depends on the number of users who have submitted applications.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.resource-calculator</name>
+    <value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
+    <display-name>CPU Scheduling</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>org.apache.hadoop.yarn.util.resource.DominantResourceCalculator</value>
+          <label>Enabled</label>
+        </entry>
+        <entry>
+          <value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
+          <label>Disabled</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.accessible-node-labels</name>
+    <value>*</value>
+    <description></description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/configuration/yarn-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/configuration/yarn-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/configuration/yarn-env.xml
new file mode 100755
index 0000000..95372fb
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/configuration/yarn-env.xml
@@ -0,0 +1,243 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>yarn_log_dir_prefix</name>
+    <display-name>YARN Log Dir Prefix</display-name>
+    <value>/var/log/hadoop-yarn</value>
+    <description>YARN Log Dir Prefix</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+  </property>
+  <property>
+    <name>yarn_pid_dir_prefix</name>
+    <display-name>YARN PID Dir Prefix</display-name>
+    <value>/var/run/hadoop-yarn</value>
+    <description>YARN PID Dir Prefix</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+  </property>
+  <property>
+    <name>yarn_user</name>
+    <value>yarn</value>
+    <property-type>USER</property-type>
+    <description>YARN User</description>
+  </property>
+  <property>
+    <name>yarn_heapsize</name>
+    <display-name>YARN Java heap size</display-name>
+    <value>1024</value>
+    <description>Max heapsize for all YARN components using a numerical value in the scale of MB</description>
+    <value-attributes>
+      <type>int</type>
+      <unit>MB</unit>
+    </value-attributes>
+  </property>
+  <property>
+    <name>resourcemanager_heapsize</name>
+    <display-name>ResourceManager Java heap size</display-name>
+    <value>1024</value>
+    <description>Max heapsize for ResourceManager using a numerical value in the scale of MB</description>
+    <value-attributes>
+      <type>int</type>
+      <overridable>false</overridable>
+      <unit>MB</unit>
+    </value-attributes>
+  </property>
+  <property>
+    <name>nodemanager_heapsize</name>
+    <display-name>NodeManager Java heap size</display-name>
+    <value>1024</value>
+    <description>Max heapsize for NodeManager using a numerical value in the scale of MB</description>
+    <value-attributes>
+      <type>int</type>
+      <unit>MB</unit>
+    </value-attributes>
+  </property>
+  <property>
+    <name>min_user_id</name>
+    <display-name>Minimum user ID for submitting job</display-name>
+    <value>1000</value>
+    <description>Set to 0 to disallow root from submitting jobs. Set to 1000 to disallow all superusers from submitting jobs</description>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+  </property>
+  <property>
+    <name>apptimelineserver_heapsize</name>
+    <display-name>AppTimelineServer Java heap size</display-name>
+    <value>1024</value>
+    <description>Max heapsize for AppTimelineServer using a numerical value in the scale of MB</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <unit>MB</unit>
+      <type>int</type>
+    </value-attributes>
+  </property>
+
+  <property>
+   <name>yarn_cgroups_enabled</name>
+   <value>false</value>
+   <description>You can use CGroups to isolate CPU-heavy processes in a Hadoop cluster.</description>
+   <display-name>CPU Isolation</display-name>
+   <value-attributes>
+     <type>value-list</type>
+     <entries>
+       <entry>
+         <value>true</value>
+         <label>Enabled</label>
+       </entry>
+       <entry>
+         <value>false</value>
+         <label>Disabled</label>
+       </entry>
+     </entries>
+     <selection-cardinality>1</selection-cardinality>
+   </value-attributes>
+  </property>
+
+  <!-- yarn-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for yarn-env.sh file</description>
+    <value>
+export HADOOP_YARN_HOME={{hadoop_yarn_home}}
+export YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER
+export YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+export JAVA_HOME={{java64_home}}
+
+# User for YARN daemons
+export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
+
+# resolve links - $0 may be a softlink
+export YARN_CONF_DIR="${YARN_CONF_DIR:-/etc/hadoop/conf}"
+
+# some Java parameters
+# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
+if [ "$JAVA_HOME" != "" ]; then
+  #echo "run java in $JAVA_HOME"
+  JAVA_HOME=$JAVA_HOME
+fi
+
+if [ "$JAVA_HOME" = "" ]; then
+  echo "Error: JAVA_HOME is not set."
+  exit 1
+fi
+
+JAVA=$JAVA_HOME/bin/java
+JAVA_HEAP_MAX=-Xmx1000m
+
+# For setting YARN specific HEAP sizes please use this
+# Parameter and set appropriately
+YARN_HEAPSIZE={{yarn_heapsize}}
+
+# check envvars which might override default args
+if [ "$YARN_HEAPSIZE" != "" ]; then
+  JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
+fi
+
+# Resource Manager specific parameters
+
+# Specify the max Heapsize for the ResourceManager using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1000.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_RESOURCEMANAGER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+export YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}
+
+# Specify the JVM options to be used when starting the ResourceManager.
+# These options will be appended to the options specified as YARN_OPTS
+# and therefore may override any similar flags set in YARN_OPTS
+#export YARN_RESOURCEMANAGER_OPTS=
+
+# Node Manager specific parameters
+
+# Specify the max Heapsize for the NodeManager using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1000.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_NODEMANAGER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+export YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}
+
+# Specify the max Heapsize for the HistoryManager using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1024.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_HISTORYSERVER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+export YARN_HISTORYSERVER_HEAPSIZE={{apptimelineserver_heapsize}}
+
+# Specify the JVM options to be used when starting the NodeManager.
+# These options will be appended to the options specified as YARN_OPTS
+# and therefore may override any similar flags set in YARN_OPTS
+#export YARN_NODEMANAGER_OPTS=
+
+# so that filenames w/ spaces are handled correctly in loops below
+IFS=
+
+
+# default log directory and file
+if [ "$YARN_LOG_DIR" = "" ]; then
+  YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
+fi
+if [ "$YARN_LOGFILE" = "" ]; then
+  YARN_LOGFILE='yarn.log'
+fi
+
+# default policy file for service-level authorization
+if [ "$YARN_POLICYFILE" = "" ]; then
+  YARN_POLICYFILE="hadoop-policy.xml"
+fi
+
+# restore ordinary behaviour
+unset IFS
+
+
+YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
+YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
+YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
+YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
+YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME"
+YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING"
+YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
+  YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
+fi
+YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/configuration/yarn-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/configuration/yarn-log4j.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/configuration/yarn-log4j.xml
new file mode 100755
index 0000000..8c44b9e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/configuration/yarn-log4j.xml
@@ -0,0 +1,71 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <description>Custom log4j.properties</description>
+    <value>
+#Relative to Yarn Log Dir Prefix
+yarn.log.dir=.
+#
+# Job Summary Appender
+#
+# Use following logger to send summary to separate file defined by
+# hadoop.mapreduce.jobsummary.log.file rolled daily:
+# hadoop.mapreduce.jobsummary.logger=INFO,JSA
+#
+hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
+hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
+log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
+# Set the ResourceManager summary log filename
+yarn.server.resourcemanager.appsummary.log.file=hadoop-mapreduce.jobsummary.log
+# Set the ResourceManager summary log level and appender
+yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}
+#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
+
+# To enable AppSummaryLogging for the RM,
+# set yarn.server.resourcemanager.appsummary.logger to
+# LEVEL,RMSUMMARY in hadoop-env.sh
+
+# Appender for ResourceManager Application Summary Log
+# Requires the following properties to be set
+#    - hadoop.log.dir (Hadoop Log directory)
+#    - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
+#    - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
+log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
+log4j.appender.RMSUMMARY.File=${yarn.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
+log4j.appender.RMSUMMARY.MaxFileSize=256MB
+log4j.appender.RMSUMMARY.MaxBackupIndex=20
+log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
+log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
+log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+log4j.appender.JSA.DatePattern=.yyyy-MM-dd
+log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
+log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
+log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
+    </value>
+  </property>
+
+</configuration>


[31/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/hive-schema-0.13.0.oracle.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/hive-schema-0.13.0.oracle.sql b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/hive-schema-0.13.0.oracle.sql
new file mode 100755
index 0000000..1654750
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/hive-schema-0.13.0.oracle.sql
@@ -0,0 +1,834 @@
+-- Table SEQUENCE_TABLE is an internal table required by DataNucleus.
+-- NOTE: Some versions of SchemaTool do not automatically generate this table.
+-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+CREATE TABLE SEQUENCE_TABLE
+(
+   SEQUENCE_NAME VARCHAR2(255) NOT NULL,
+   NEXT_VAL NUMBER NOT NULL
+);
+
+ALTER TABLE SEQUENCE_TABLE ADD CONSTRAINT PART_TABLE_PK PRIMARY KEY (SEQUENCE_NAME);
+
+-- Table NUCLEUS_TABLES is an internal table required by DataNucleus.
+-- This table is required if datanucleus.autoStartMechanism=SchemaTable
+-- NOTE: Some versions of SchemaTool do not automatically generate this table.
+-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+CREATE TABLE NUCLEUS_TABLES
+(
+   CLASS_NAME VARCHAR2(128) NOT NULL,
+   TABLE_NAME VARCHAR2(128) NOT NULL,
+   TYPE VARCHAR2(4) NOT NULL,
+   OWNER VARCHAR2(2) NOT NULL,
+   VERSION VARCHAR2(20) NOT NULL,
+   INTERFACE_NAME VARCHAR2(255) NULL
+);
+
+ALTER TABLE NUCLEUS_TABLES ADD CONSTRAINT NUCLEUS_TABLES_PK PRIMARY KEY (CLASS_NAME);
+
+-- Table PART_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+CREATE TABLE PART_COL_PRIVS
+(
+    PART_COLUMN_GRANT_ID NUMBER NOT NULL,
+    "COLUMN_NAME" VARCHAR2(128) NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PART_ID NUMBER NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    PART_COL_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY (PART_COLUMN_GRANT_ID);
+
+-- Table CDS.
+CREATE TABLE CDS
+(
+    CD_ID NUMBER NOT NULL
+);
+
+ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID);
+
+-- Table COLUMNS_V2 for join relationship
+CREATE TABLE COLUMNS_V2
+(
+    CD_ID NUMBER NOT NULL,
+    "COMMENT" VARCHAR2(256) NULL,
+    "COLUMN_NAME" VARCHAR2(128) NOT NULL,
+    TYPE_NAME VARCHAR2(4000) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_PK PRIMARY KEY (CD_ID,"COLUMN_NAME");
+
+-- Table PARTITION_KEY_VALS for join relationship
+CREATE TABLE PARTITION_KEY_VALS
+(
+    PART_ID NUMBER NOT NULL,
+    PART_KEY_VAL VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY (PART_ID,INTEGER_IDX);
+
+-- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase]
+CREATE TABLE DBS
+(
+    DB_ID NUMBER NOT NULL,
+    "DESC" VARCHAR2(4000) NULL,
+    DB_LOCATION_URI VARCHAR2(4000) NOT NULL,
+    "NAME" VARCHAR2(128) NULL,
+    OWNER_NAME VARCHAR2(128) NULL,
+    OWNER_TYPE VARCHAR2(10) NULL
+);
+
+ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
+
+-- Table PARTITION_PARAMS for join relationship
+CREATE TABLE PARTITION_PARAMS
+(
+    PART_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PART_ID,PARAM_KEY);
+
+-- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+CREATE TABLE SERDES
+(
+    SERDE_ID NUMBER NOT NULL,
+    "NAME" VARCHAR2(128) NULL,
+    SLIB VARCHAR2(4000) NULL
+);
+
+ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
+
+-- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType]
+CREATE TABLE TYPES
+(
+    TYPES_ID NUMBER NOT NULL,
+    TYPE_NAME VARCHAR2(128) NULL,
+    TYPE1 VARCHAR2(767) NULL,
+    TYPE2 VARCHAR2(767) NULL
+);
+
+ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID);
+
+-- Table PARTITION_KEYS for join relationship
+CREATE TABLE PARTITION_KEYS
+(
+    TBL_ID NUMBER NOT NULL,
+    PKEY_COMMENT VARCHAR2(4000) NULL,
+    PKEY_NAME VARCHAR2(128) NOT NULL,
+    PKEY_TYPE VARCHAR2(767) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEY_PK PRIMARY KEY (TBL_ID,PKEY_NAME);
+
+-- Table ROLES for classes [org.apache.hadoop.hive.metastore.model.MRole]
+CREATE TABLE ROLES
+(
+    ROLE_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    OWNER_NAME VARCHAR2(128) NULL,
+    ROLE_NAME VARCHAR2(128) NULL
+);
+
+ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID);
+
+-- Table PARTITIONS for classes [org.apache.hadoop.hive.metastore.model.MPartition]
+CREATE TABLE PARTITIONS
+(
+    PART_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+    PART_NAME VARCHAR2(767) NULL,
+    SD_ID NUMBER NULL,
+    TBL_ID NUMBER NULL
+);
+
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
+
+-- Table INDEX_PARAMS for join relationship
+CREATE TABLE INDEX_PARAMS
+(
+    INDEX_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PARAM_KEY);
+
+-- Table TBL_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+CREATE TABLE TBL_COL_PRIVS
+(
+    TBL_COLUMN_GRANT_ID NUMBER NOT NULL,
+    "COLUMN_NAME" VARCHAR2(128) NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    TBL_COL_PRIV VARCHAR2(128) NULL,
+    TBL_ID NUMBER NULL
+);
+
+ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_PK PRIMARY KEY (TBL_COLUMN_GRANT_ID);
+
+-- Table IDXS for classes [org.apache.hadoop.hive.metastore.model.MIndex]
+CREATE TABLE IDXS
+(
+    INDEX_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    DEFERRED_REBUILD NUMBER(1) NOT NULL CHECK (DEFERRED_REBUILD IN (1,0)),
+    INDEX_HANDLER_CLASS VARCHAR2(4000) NULL,
+    INDEX_NAME VARCHAR2(128) NULL,
+    INDEX_TBL_ID NUMBER NULL,
+    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+    ORIG_TBL_ID NUMBER NULL,
+    SD_ID NUMBER NULL
+);
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_PK PRIMARY KEY (INDEX_ID);
+
+-- Table BUCKETING_COLS for join relationship
+CREATE TABLE BUCKETING_COLS
+(
+    SD_ID NUMBER NOT NULL,
+    BUCKET_COL_NAME VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+-- Table TYPE_FIELDS for join relationship
+CREATE TABLE TYPE_FIELDS
+(
+    TYPE_NAME NUMBER NOT NULL,
+    "COMMENT" VARCHAR2(256) NULL,
+    FIELD_NAME VARCHAR2(128) NOT NULL,
+    FIELD_TYPE VARCHAR2(767) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_PK PRIMARY KEY (TYPE_NAME,FIELD_NAME);
+
+-- Table SD_PARAMS for join relationship
+CREATE TABLE SD_PARAMS
+(
+    SD_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY);
+
+-- Table GLOBAL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+CREATE TABLE GLOBAL_PRIVS
+(
+    USER_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    USER_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_ID);
+
+-- Table SDS for classes [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+CREATE TABLE SDS
+(
+    SD_ID NUMBER NOT NULL,
+    CD_ID NUMBER NULL,
+    INPUT_FORMAT VARCHAR2(4000) NULL,
+    IS_COMPRESSED NUMBER(1) NOT NULL CHECK (IS_COMPRESSED IN (1,0)),
+    LOCATION VARCHAR2(4000) NULL,
+    NUM_BUCKETS NUMBER (10) NOT NULL,
+    OUTPUT_FORMAT VARCHAR2(4000) NULL,
+    SERDE_ID NUMBER NULL,
+    IS_STOREDASSUBDIRECTORIES NUMBER(1) NOT NULL CHECK (IS_STOREDASSUBDIRECTORIES IN (1,0))
+);
+
+ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID);
+
+-- Table TABLE_PARAMS for join relationship
+CREATE TABLE TABLE_PARAMS
+(
+    TBL_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY);
+
+-- Table SORT_COLS for join relationship
+CREATE TABLE SORT_COLS
+(
+    SD_ID NUMBER NOT NULL,
+    "COLUMN_NAME" VARCHAR2(128) NULL,
+    "ORDER" NUMBER (10) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+-- Table TBL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+CREATE TABLE TBL_PRIVS
+(
+    TBL_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    TBL_PRIV VARCHAR2(128) NULL,
+    TBL_ID NUMBER NULL
+);
+
+ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_PK PRIMARY KEY (TBL_GRANT_ID);
+
+-- Table DATABASE_PARAMS for join relationship
+CREATE TABLE DATABASE_PARAMS
+(
+    DB_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(180) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID,PARAM_KEY);
+
+-- Table ROLE_MAP for classes [org.apache.hadoop.hive.metastore.model.MRoleMap]
+CREATE TABLE ROLE_MAP
+(
+    ROLE_GRANT_ID NUMBER NOT NULL,
+    ADD_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    ROLE_ID NUMBER NULL
+);
+
+ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_PK PRIMARY KEY (ROLE_GRANT_ID);
+
+-- Table SERDE_PARAMS for join relationship
+CREATE TABLE SERDE_PARAMS
+(
+    SERDE_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY);
+
+-- Table PART_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+CREATE TABLE PART_PRIVS
+(
+    PART_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PART_ID NUMBER NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    PART_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_PK PRIMARY KEY (PART_GRANT_ID);
+
+-- Table DB_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+CREATE TABLE DB_PRIVS
+(
+    DB_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    DB_ID NUMBER NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    DB_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_PK PRIMARY KEY (DB_GRANT_ID);
+
+-- Table TBLS for classes [org.apache.hadoop.hive.metastore.model.MTable]
+CREATE TABLE TBLS
+(
+    TBL_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    DB_ID NUMBER NULL,
+    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+    OWNER VARCHAR2(767) NULL,
+    RETENTION NUMBER (10) NOT NULL,
+    SD_ID NUMBER NULL,
+    TBL_NAME VARCHAR2(128) NULL,
+    TBL_TYPE VARCHAR2(128) NULL,
+    VIEW_EXPANDED_TEXT CLOB NULL,
+    VIEW_ORIGINAL_TEXT CLOB NULL
+);
+
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
+
+-- Table PARTITION_EVENTS for classes [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+CREATE TABLE PARTITION_EVENTS
+(
+    PART_NAME_ID NUMBER NOT NULL,
+    DB_NAME VARCHAR2(128) NULL,
+    EVENT_TIME NUMBER NOT NULL,
+    EVENT_TYPE NUMBER (10) NOT NULL,
+    PARTITION_NAME VARCHAR2(767) NULL,
+    TBL_NAME VARCHAR2(128) NULL
+);
+
+ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID);
+
+-- Table SKEWED_STRING_LIST for classes [org.apache.hadoop.hive.metastore.model.MStringList]
+CREATE TABLE SKEWED_STRING_LIST
+(
+    STRING_LIST_ID NUMBER NOT NULL
+);
+
+ALTER TABLE SKEWED_STRING_LIST ADD CONSTRAINT SKEWED_STRING_LIST_PK PRIMARY KEY (STRING_LIST_ID);
+
+CREATE TABLE SKEWED_STRING_LIST_VALUES
+(
+    STRING_LIST_ID NUMBER NOT NULL,
+    "STRING_LIST_VALUE" VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_PK PRIMARY KEY (STRING_LIST_ID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_COL_NAMES
+(
+    SD_ID NUMBER NOT NULL,
+    "SKEWED_COL_NAME" VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_COL_VALUE_LOC_MAP
+(
+    SD_ID NUMBER NOT NULL,
+    STRING_LIST_ID_KID NUMBER NOT NULL,
+    "LOCATION" VARCHAR2(4000) NULL
+);
+
+CREATE TABLE MASTER_KEYS
+(
+    KEY_ID NUMBER (10) NOT NULL,
+    MASTER_KEY VARCHAR2(767) NULL
+);
+
+CREATE TABLE DELEGATION_TOKENS
+(
+    TOKEN_IDENT VARCHAR2(767) NOT NULL,
+    TOKEN VARCHAR2(767) NULL
+);
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK PRIMARY KEY (SD_ID,STRING_LIST_ID_KID);
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK1 FOREIGN KEY (STRING_LIST_ID_KID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_VALUES
+(
+    SD_ID_OID NUMBER NOT NULL,
+    STRING_LIST_ID_EID NUMBER NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_PK PRIMARY KEY (SD_ID_OID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID_EID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK2 FOREIGN KEY (SD_ID_OID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+-- column statistics
+
+CREATE TABLE TAB_COL_STATS (
+ CS_ID NUMBER NOT NULL,
+ DB_NAME VARCHAR2(128) NOT NULL,
+ TABLE_NAME VARCHAR2(128) NOT NULL,
+ COLUMN_NAME VARCHAR2(128) NOT NULL,
+ COLUMN_TYPE VARCHAR2(128) NOT NULL,
+ TBL_ID NUMBER NOT NULL,
+ LONG_LOW_VALUE NUMBER,
+ LONG_HIGH_VALUE NUMBER,
+ DOUBLE_LOW_VALUE NUMBER,
+ DOUBLE_HIGH_VALUE NUMBER,
+ BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
+ BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
+ NUM_NULLS NUMBER NOT NULL,
+ NUM_DISTINCTS NUMBER,
+ AVG_COL_LEN NUMBER,
+ MAX_COL_LEN NUMBER,
+ NUM_TRUES NUMBER,
+ NUM_FALSES NUMBER,
+ LAST_ANALYZED NUMBER NOT NULL
+);
+
+CREATE TABLE VERSION (
+  VER_ID NUMBER NOT NULL,
+  SCHEMA_VERSION VARCHAR(127) NOT NULL,
+  VERSION_COMMENT VARCHAR(255)
+);
+ALTER TABLE VERSION ADD CONSTRAINT VERSION_PK PRIMARY KEY (VER_ID);
+
+ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PKEY PRIMARY KEY (CS_ID);
+
+ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS(TBL_ID);
+
+CREATE TABLE PART_COL_STATS (
+ CS_ID NUMBER NOT NULL,
+ DB_NAME VARCHAR2(128) NOT NULL,
+ TABLE_NAME VARCHAR2(128) NOT NULL,
+ PARTITION_NAME VARCHAR2(767) NOT NULL,
+ COLUMN_NAME VARCHAR2(128) NOT NULL,
+ COLUMN_TYPE VARCHAR2(128) NOT NULL,
+ PART_ID NUMBER NOT NULL,
+ LONG_LOW_VALUE NUMBER,
+ LONG_HIGH_VALUE NUMBER,
+ DOUBLE_LOW_VALUE NUMBER,
+ DOUBLE_HIGH_VALUE NUMBER,
+ BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
+ BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
+ NUM_NULLS NUMBER NOT NULL,
+ NUM_DISTINCTS NUMBER,
+ AVG_COL_LEN NUMBER,
+ MAX_COL_LEN NUMBER,
+ NUM_TRUES NUMBER,
+ NUM_FALSES NUMBER,
+ LAST_ANALYZED NUMBER NOT NULL
+);
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PKEY PRIMARY KEY (CS_ID);
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED;
+
+CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);
+
+CREATE TABLE FUNCS (
+  FUNC_ID NUMBER NOT NULL,
+  CLASS_NAME VARCHAR2(4000),
+  CREATE_TIME NUMBER(10) NOT NULL,
+  DB_ID NUMBER,
+  FUNC_NAME VARCHAR2(128),
+  FUNC_TYPE NUMBER(10) NOT NULL,
+  OWNER_NAME VARCHAR2(128),
+  OWNER_TYPE VARCHAR2(10)
+);
+
+ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_PK PRIMARY KEY (FUNC_ID);
+
+CREATE TABLE FUNC_RU (
+  FUNC_ID NUMBER NOT NULL,
+  RESOURCE_TYPE NUMBER(10) NOT NULL,
+  RESOURCE_URI VARCHAR2(4000),
+  INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_PK PRIMARY KEY (FUNC_ID, INTEGER_IDX);
+
+
+-- Constraints for table PART_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PART_COL_PRIVS_N49 ON PART_COL_PRIVS (PART_ID);
+
+CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table COLUMNS_V2
+ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_FK1 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX COLUMNS_V2_N49 ON COLUMNS_V2 (CD_ID);
+
+
+-- Constraints for table PARTITION_KEY_VALS
+ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_KEY_VALS_N49 ON PARTITION_KEY_VALS (PART_ID);
+
+
+-- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase]
+CREATE UNIQUE INDEX UNIQUE_DATABASE ON DBS ("NAME");
+
+
+-- Constraints for table PARTITION_PARAMS
+ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_PARAMS_N49 ON PARTITION_PARAMS (PART_ID);
+
+
+-- Constraints for table SERDES for class(es) [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+
+-- Constraints for table TYPES for class(es) [org.apache.hadoop.hive.metastore.model.MType]
+CREATE UNIQUE INDEX UNIQUE_TYPE ON TYPES (TYPE_NAME);
+
+
+-- Constraints for table PARTITION_KEYS
+ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEYS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_KEYS_N49 ON PARTITION_KEYS (TBL_ID);
+
+
+-- Constraints for table ROLES for class(es) [org.apache.hadoop.hive.metastore.model.MRole]
+CREATE UNIQUE INDEX ROLEENTITYINDEX ON ROLES (ROLE_NAME);
+
+
+-- Constraints for table PARTITIONS for class(es) [org.apache.hadoop.hive.metastore.model.MPartition]
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITIONS_N49 ON PARTITIONS (SD_ID);
+
+CREATE INDEX PARTITIONS_N50 ON PARTITIONS (TBL_ID);
+
+CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (PART_NAME,TBL_ID);
+
+
+-- Constraints for table INDEX_PARAMS
+ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_FK1 FOREIGN KEY (INDEX_ID) REFERENCES IDXS (INDEX_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX INDEX_PARAMS_N49 ON INDEX_PARAMS (INDEX_ID);
+
+
+-- Constraints for table TBL_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS (TBL_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX TBL_COL_PRIVS_N49 ON TBL_COL_PRIVS (TBL_ID);
+
+
+-- Constraints for table IDXS for class(es) [org.apache.hadoop.hive.metastore.model.MIndex]
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK1 FOREIGN KEY (ORIG_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK3 FOREIGN KEY (INDEX_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE UNIQUE INDEX UNIQUEINDEX ON IDXS (INDEX_NAME,ORIG_TBL_ID);
+
+CREATE INDEX IDXS_N50 ON IDXS (INDEX_TBL_ID);
+
+CREATE INDEX IDXS_N51 ON IDXS (SD_ID);
+
+CREATE INDEX IDXS_N49 ON IDXS (ORIG_TBL_ID);
+
+
+-- Constraints for table BUCKETING_COLS
+ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX BUCKETING_COLS_N49 ON BUCKETING_COLS (SD_ID);
+
+
+-- Constraints for table TYPE_FIELDS
+ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_FK1 FOREIGN KEY (TYPE_NAME) REFERENCES TYPES (TYPES_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TYPE_FIELDS_N49 ON TYPE_FIELDS (TYPE_NAME);
+
+
+-- Constraints for table SD_PARAMS
+ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SD_PARAMS_N49 ON SD_PARAMS (SD_ID);
+
+
+-- Constraints for table GLOBAL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+CREATE UNIQUE INDEX GLOBALPRIVILEGEINDEX ON GLOBAL_PRIVS (PRINCIPAL_NAME,PRINCIPAL_TYPE,USER_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table SDS for class(es) [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+ALTER TABLE SDS ADD CONSTRAINT SDS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
+ALTER TABLE SDS ADD CONSTRAINT SDS_FK2 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SDS_N49 ON SDS (SERDE_ID);
+CREATE INDEX SDS_N50 ON SDS (CD_ID);
+
+
+-- Constraints for table TABLE_PARAMS
+ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID);
+
+
+-- Constraints for table SORT_COLS
+ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SORT_COLS_N49 ON SORT_COLS (SD_ID);
+
+
+-- Constraints for table TBL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TBL_PRIVS_N49 ON TBL_PRIVS (TBL_ID);
+
+CREATE INDEX TABLEPRIVILEGEINDEX ON TBL_PRIVS (TBL_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table DATABASE_PARAMS
+ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX DATABASE_PARAMS_N49 ON DATABASE_PARAMS (DB_ID);
+
+
+-- Constraints for table ROLE_MAP for class(es) [org.apache.hadoop.hive.metastore.model.MRoleMap]
+ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES ROLES (ROLE_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX ROLE_MAP_N49 ON ROLE_MAP (ROLE_ID);
+
+CREATE UNIQUE INDEX USERROLEMAPINDEX ON ROLE_MAP (PRINCIPAL_NAME,ROLE_ID,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table SERDE_PARAMS
+ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SERDE_PARAMS_N49 ON SERDE_PARAMS (SERDE_ID);
+
+
+-- Constraints for table PART_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTPRIVILEGEINDEX ON PART_PRIVS (PART_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX PART_PRIVS_N49 ON PART_PRIVS (PART_ID);
+
+
+-- Constraints for table DB_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+CREATE UNIQUE INDEX DBPRIVILEGEINDEX ON DB_PRIVS (DB_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,DB_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX DB_PRIVS_N49 ON DB_PRIVS (DB_ID);
+
+
+-- Constraints for table TBLS for class(es) [org.apache.hadoop.hive.metastore.model.MTable]
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK2 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TBLS_N49 ON TBLS (DB_ID);
+
+CREATE UNIQUE INDEX UNIQUETABLE ON TBLS (TBL_NAME,DB_ID);
+
+CREATE INDEX TBLS_N50 ON TBLS (SD_ID);
+
+
+-- Constraints for table PARTITION_EVENTS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+CREATE INDEX PARTITIONEVENTINDEX ON PARTITION_EVENTS (PARTITION_NAME);
+
+
+-- Constraints for table FUNCS for class(es) [org.apache.hadoop.hive.metastore.model.MFunctions]
+ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED;
+
+CREATE UNIQUE INDEX UNIQUEFUNCTION ON FUNCS (FUNC_NAME, DB_ID);
+
+CREATE INDEX FUNCS_N49 ON FUNCS (DB_ID);
+
+
+-- Constraints for table FUNC_RU for class(es) [org.apache.hadoop.hive.metastore.model.MFunctions]
+ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_FK1 FOREIGN KEY (FUNC_ID) REFERENCES FUNCS (FUNC_ID) INITIALLY DEFERRED;
+
+CREATE INDEX FUNC_RU_N49 ON FUNC_RU (FUNC_ID);
+
+
+-- -----------------------------------------------------------------------------------------------------------------------------------------------
+-- Transaction and Lock Tables
+-- These are not part of package jdo, so if you are going to regenerate this file you need to manually add the following section back to the file.
+-- -----------------------------------------------------------------------------------------------------------------------------------------------
+
+CREATE TABLE TXNS (
+  TXN_ID NUMBER(19) PRIMARY KEY,
+  TXN_STATE char(1) NOT NULL,
+  TXN_STARTED NUMBER(19) NOT NULL,
+  TXN_LAST_HEARTBEAT NUMBER(19) NOT NULL,
+  TXN_USER varchar(128) NOT NULL,
+  TXN_HOST varchar(128) NOT NULL
+);
+
+CREATE TABLE TXN_COMPONENTS (
+  TC_TXNID NUMBER(19) REFERENCES TXNS (TXN_ID),
+  TC_DATABASE VARCHAR2(128) NOT NULL,
+  TC_TABLE VARCHAR2(128),
+  TC_PARTITION VARCHAR2(767) NULL
+);
+
+CREATE TABLE COMPLETED_TXN_COMPONENTS (
+  CTC_TXNID NUMBER(19),
+  CTC_DATABASE varchar(128) NOT NULL,
+  CTC_TABLE varchar(128),
+  CTC_PARTITION varchar(767)
+);
+
+CREATE TABLE NEXT_TXN_ID (
+  NTXN_NEXT NUMBER(19) NOT NULL
+);
+INSERT INTO NEXT_TXN_ID VALUES(1);
+
+CREATE TABLE HIVE_LOCKS (
+  HL_LOCK_EXT_ID NUMBER(19) NOT NULL,
+  HL_LOCK_INT_ID NUMBER(19) NOT NULL,
+  HL_TXNID NUMBER(19),
+  HL_DB VARCHAR2(128) NOT NULL,
+  HL_TABLE VARCHAR2(128),
+  HL_PARTITION VARCHAR2(767),
+  HL_LOCK_STATE CHAR(1) NOT NULL,
+  HL_LOCK_TYPE CHAR(1) NOT NULL,
+  HL_LAST_HEARTBEAT NUMBER(19) NOT NULL,
+  HL_ACQUIRED_AT NUMBER(19),
+  HL_USER varchar(128) NOT NULL,
+  HL_HOST varchar(128) NOT NULL,
+  PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID)
+);
+
+CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID);
+
+CREATE TABLE NEXT_LOCK_ID (
+  NL_NEXT NUMBER(19) NOT NULL
+);
+INSERT INTO NEXT_LOCK_ID VALUES(1);
+
+CREATE TABLE COMPACTION_QUEUE (
+  CQ_ID NUMBER(19) PRIMARY KEY,
+  CQ_DATABASE varchar(128) NOT NULL,
+  CQ_TABLE varchar(128) NOT NULL,
+  CQ_PARTITION varchar(767),
+  CQ_STATE char(1) NOT NULL,
+  CQ_TYPE char(1) NOT NULL,
+  CQ_WORKER_ID varchar(128),
+  CQ_START NUMBER(19),
+  CQ_RUN_AS varchar(128)
+);
+
+CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
+  NCQ_NEXT NUMBER(19) NOT NULL
+);
+INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+
+
+-- -----------------------------------------------------------------
+-- Record schema version. Should be the last step in the init script
+-- -----------------------------------------------------------------
+INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '0.13.0', 'Hive release version 0.13.0');

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/hive-schema-0.13.0.postgres.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/hive-schema-0.13.0.postgres.sql b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/hive-schema-0.13.0.postgres.sql
new file mode 100755
index 0000000..7c57487
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/hive-schema-0.13.0.postgres.sql
@@ -0,0 +1,1537 @@
+--
+-- PostgreSQL database dump
+--
+
+SET statement_timeout = 0;
+SET client_encoding = 'UTF8';
+SET standard_conforming_strings = off;
+SET check_function_bodies = false;
+SET client_min_messages = warning;
+SET escape_string_warning = off;
+
+SET search_path = public, pg_catalog;
+
+SET default_tablespace = '';
+
+SET default_with_oids = false;
+
+--
+-- Name: BUCKETING_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "BUCKETING_COLS" (
+    "SD_ID" bigint NOT NULL,
+    "BUCKET_COL_NAME" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: CDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "CDS" (
+    "CD_ID" bigint NOT NULL
+);
+
+
+--
+-- Name: COLUMNS_OLD; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "COLUMNS_OLD" (
+    "SD_ID" bigint NOT NULL,
+    "COMMENT" character varying(256) DEFAULT NULL::character varying,
+    "COLUMN_NAME" character varying(128) NOT NULL,
+    "TYPE_NAME" character varying(4000) NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: COLUMNS_V2; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "COLUMNS_V2" (
+    "CD_ID" bigint NOT NULL,
+    "COMMENT" character varying(4000),
+    "COLUMN_NAME" character varying(128) NOT NULL,
+    "TYPE_NAME" character varying(4000),
+    "INTEGER_IDX" integer NOT NULL
+);
+
+
+--
+-- Name: DATABASE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DATABASE_PARAMS" (
+    "DB_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(180) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: DBS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DBS" (
+    "DB_ID" bigint NOT NULL,
+    "DESC" character varying(4000) DEFAULT NULL::character varying,
+    "DB_LOCATION_URI" character varying(4000) NOT NULL,
+    "NAME" character varying(128) DEFAULT NULL::character varying,
+    "OWNER_NAME" character varying(128) DEFAULT NULL::character varying,
+    "OWNER_TYPE" character varying(10) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: DB_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DB_PRIVS" (
+    "DB_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "DB_ID" bigint,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "DB_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: GLOBAL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "GLOBAL_PRIVS" (
+    "USER_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "USER_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: IDXS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "IDXS" (
+    "INDEX_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "DEFERRED_REBUILD" boolean NOT NULL,
+    "INDEX_HANDLER_CLASS" character varying(4000) DEFAULT NULL::character varying,
+    "INDEX_NAME" character varying(128) DEFAULT NULL::character varying,
+    "INDEX_TBL_ID" bigint,
+    "LAST_ACCESS_TIME" bigint NOT NULL,
+    "ORIG_TBL_ID" bigint,
+    "SD_ID" bigint
+);
+
+
+--
+-- Name: INDEX_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "INDEX_PARAMS" (
+    "INDEX_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: NUCLEUS_TABLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "NUCLEUS_TABLES" (
+    "CLASS_NAME" character varying(128) NOT NULL,
+    "TABLE_NAME" character varying(128) NOT NULL,
+    "TYPE" character varying(4) NOT NULL,
+    "OWNER" character varying(2) NOT NULL,
+    "VERSION" character varying(20) NOT NULL,
+    "INTERFACE_NAME" character varying(255) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PARTITIONS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITIONS" (
+    "PART_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "LAST_ACCESS_TIME" bigint NOT NULL,
+    "PART_NAME" character varying(767) DEFAULT NULL::character varying,
+    "SD_ID" bigint,
+    "TBL_ID" bigint
+);
+
+
+--
+-- Name: PARTITION_EVENTS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_EVENTS" (
+    "PART_NAME_ID" bigint NOT NULL,
+    "DB_NAME" character varying(128),
+    "EVENT_TIME" bigint NOT NULL,
+    "EVENT_TYPE" integer NOT NULL,
+    "PARTITION_NAME" character varying(767),
+    "TBL_NAME" character varying(128)
+);
+
+
+--
+-- Name: PARTITION_KEYS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_KEYS" (
+    "TBL_ID" bigint NOT NULL,
+    "PKEY_COMMENT" character varying(4000) DEFAULT NULL::character varying,
+    "PKEY_NAME" character varying(128) NOT NULL,
+    "PKEY_TYPE" character varying(767) NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: PARTITION_KEY_VALS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_KEY_VALS" (
+    "PART_ID" bigint NOT NULL,
+    "PART_KEY_VAL" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: PARTITION_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_PARAMS" (
+    "PART_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PART_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_COL_PRIVS" (
+    "PART_COLUMN_GRANT_ID" bigint NOT NULL,
+    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_ID" bigint,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_COL_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PART_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_PRIVS" (
+    "PART_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_ID" bigint,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: ROLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "ROLES" (
+    "ROLE_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "OWNER_NAME" character varying(128) DEFAULT NULL::character varying,
+    "ROLE_NAME" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: ROLE_MAP; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "ROLE_MAP" (
+    "ROLE_GRANT_ID" bigint NOT NULL,
+    "ADD_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "ROLE_ID" bigint
+);
+
+
+--
+-- Name: SDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SDS" (
+    "SD_ID" bigint NOT NULL,
+    "INPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
+    "IS_COMPRESSED" boolean NOT NULL,
+    "LOCATION" character varying(4000) DEFAULT NULL::character varying,
+    "NUM_BUCKETS" bigint NOT NULL,
+    "OUTPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
+    "SERDE_ID" bigint,
+    "CD_ID" bigint,
+    "IS_STOREDASSUBDIRECTORIES" boolean NOT NULL
+);
+
+
+--
+-- Name: SD_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SD_PARAMS" (
+    "SD_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: SEQUENCE_TABLE; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SEQUENCE_TABLE" (
+    "SEQUENCE_NAME" character varying(255) NOT NULL,
+    "NEXT_VAL" bigint NOT NULL
+);
+
+
+--
+-- Name: SERDES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SERDES" (
+    "SERDE_ID" bigint NOT NULL,
+    "NAME" character varying(128) DEFAULT NULL::character varying,
+    "SLIB" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: SERDE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SERDE_PARAMS" (
+    "SERDE_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: SORT_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SORT_COLS" (
+    "SD_ID" bigint NOT NULL,
+    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+    "ORDER" bigint NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: TABLE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TABLE_PARAMS" (
+    "TBL_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: TBLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBLS" (
+    "TBL_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "DB_ID" bigint,
+    "LAST_ACCESS_TIME" bigint NOT NULL,
+    "OWNER" character varying(767) DEFAULT NULL::character varying,
+    "RETENTION" bigint NOT NULL,
+    "SD_ID" bigint,
+    "TBL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "TBL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "VIEW_EXPANDED_TEXT" text,
+    "VIEW_ORIGINAL_TEXT" text
+);
+
+
+--
+-- Name: TBL_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBL_COL_PRIVS" (
+    "TBL_COLUMN_GRANT_ID" bigint NOT NULL,
+    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "TBL_COL_PRIV" character varying(128) DEFAULT NULL::character varying,
+    "TBL_ID" bigint
+);
+
+
+--
+-- Name: TBL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBL_PRIVS" (
+    "TBL_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "TBL_PRIV" character varying(128) DEFAULT NULL::character varying,
+    "TBL_ID" bigint
+);
+
+
+--
+-- Name: TYPES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TYPES" (
+    "TYPES_ID" bigint NOT NULL,
+    "TYPE_NAME" character varying(128) DEFAULT NULL::character varying,
+    "TYPE1" character varying(767) DEFAULT NULL::character varying,
+    "TYPE2" character varying(767) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: TYPE_FIELDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TYPE_FIELDS" (
+    "TYPE_NAME" bigint NOT NULL,
+    "COMMENT" character varying(256) DEFAULT NULL::character varying,
+    "FIELD_NAME" character varying(128) NOT NULL,
+    "FIELD_TYPE" character varying(767) NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_STRING_LIST" (
+    "STRING_LIST_ID" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_STRING_LIST_VALUES" (
+    "STRING_LIST_ID" bigint NOT NULL,
+    "STRING_LIST_VALUE" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_COL_NAMES" (
+    "SD_ID" bigint NOT NULL,
+    "SKEWED_COL_NAME" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_COL_VALUE_LOC_MAP" (
+    "SD_ID" bigint NOT NULL,
+    "STRING_LIST_ID_KID" bigint NOT NULL,
+    "LOCATION" character varying(4000) DEFAULT NULL::character varying
+);
+
+CREATE TABLE "SKEWED_VALUES" (
+    "SD_ID_OID" bigint NOT NULL,
+    "STRING_LIST_ID_EID" bigint NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: TAB_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE  "MASTER_KEYS"
+(
+    "KEY_ID" SERIAL,
+    "MASTER_KEY" varchar(767) NULL,
+    PRIMARY KEY ("KEY_ID")
+);
+
+CREATE TABLE  "DELEGATION_TOKENS"
+(
+    "TOKEN_IDENT" varchar(767) NOT NULL,
+    "TOKEN" varchar(767) NULL,
+    PRIMARY KEY ("TOKEN_IDENT")
+);
+
+CREATE TABLE "TAB_COL_STATS" (
+ "CS_ID" bigint NOT NULL,
+ "DB_NAME" character varying(128) DEFAULT NULL::character varying,
+ "TABLE_NAME" character varying(128) DEFAULT NULL::character varying,
+ "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+ "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "TBL_ID" bigint NOT NULL,
+ "LONG_LOW_VALUE" bigint,
+ "LONG_HIGH_VALUE" bigint,
+ "DOUBLE_LOW_VALUE" double precision,
+ "DOUBLE_HIGH_VALUE" double precision,
+ "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "NUM_NULLS" bigint NOT NULL,
+ "NUM_DISTINCTS" bigint,
+ "AVG_COL_LEN" double precision,
+ "MAX_COL_LEN" bigint,
+ "NUM_TRUES" bigint,
+ "NUM_FALSES" bigint,
+ "LAST_ANALYZED" bigint NOT NULL
+);
+
+--
+-- Table structure for VERSION
+--
+CREATE TABLE "VERSION" (
+  "VER_ID" bigint,
+  "SCHEMA_VERSION" character varying(127) NOT NULL,
+  "VERSION_COMMENT" character varying(255) NOT NULL
+);
+
+--
+-- Name: PART_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_COL_STATS" (
+ "CS_ID" bigint NOT NULL,
+ "DB_NAME" character varying(128) DEFAULT NULL::character varying,
+ "TABLE_NAME" character varying(128) DEFAULT NULL::character varying,
+ "PARTITION_NAME" character varying(767) DEFAULT NULL::character varying,
+ "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+ "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "PART_ID" bigint NOT NULL,
+ "LONG_LOW_VALUE" bigint,
+ "LONG_HIGH_VALUE" bigint,
+ "DOUBLE_LOW_VALUE" double precision,
+ "DOUBLE_HIGH_VALUE" double precision,
+ "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "NUM_NULLS" bigint NOT NULL,
+ "NUM_DISTINCTS" bigint,
+ "AVG_COL_LEN" double precision,
+ "MAX_COL_LEN" bigint,
+ "NUM_TRUES" bigint,
+ "NUM_FALSES" bigint,
+ "LAST_ANALYZED" bigint NOT NULL
+);
+
+--
+-- Table structure for FUNCS
+--
+CREATE TABLE "FUNCS" (
+  "FUNC_ID" BIGINT NOT NULL,
+  "CLASS_NAME" VARCHAR(4000),
+  "CREATE_TIME" INTEGER NOT NULL,
+  "DB_ID" BIGINT,
+  "FUNC_NAME" VARCHAR(128),
+  "FUNC_TYPE" INTEGER NOT NULL,
+  "OWNER_NAME" VARCHAR(128),
+  "OWNER_TYPE" VARCHAR(10),
+  PRIMARY KEY ("FUNC_ID")
+);
+
+--
+-- Table structure for FUNC_RU
+--
+CREATE TABLE "FUNC_RU" (
+  "FUNC_ID" BIGINT NOT NULL,
+  "RESOURCE_TYPE" INTEGER NOT NULL,
+  "RESOURCE_URI" VARCHAR(4000),
+  "INTEGER_IDX" INTEGER NOT NULL,
+  PRIMARY KEY ("FUNC_ID", "INTEGER_IDX")
+);
+
+--
+-- Name: BUCKETING_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "BUCKETING_COLS"
+    ADD CONSTRAINT "BUCKETING_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+
+--
+-- Name: CDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "CDS"
+    ADD CONSTRAINT "CDS_pkey" PRIMARY KEY ("CD_ID");
+
+
+--
+-- Name: COLUMNS_V2_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "COLUMNS_V2"
+    ADD CONSTRAINT "COLUMNS_V2_pkey" PRIMARY KEY ("CD_ID", "COLUMN_NAME");
+
+
+--
+-- Name: COLUMNS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "COLUMNS_OLD"
+    ADD CONSTRAINT "COLUMNS_pkey" PRIMARY KEY ("SD_ID", "COLUMN_NAME");
+
+
+--
+-- Name: DATABASE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DATABASE_PARAMS"
+    ADD CONSTRAINT "DATABASE_PARAMS_pkey" PRIMARY KEY ("DB_ID", "PARAM_KEY");
+
+
+--
+-- Name: DBPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+    ADD CONSTRAINT "DBPRIVILEGEINDEX" UNIQUE ("DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: DBS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DBS"
+    ADD CONSTRAINT "DBS_pkey" PRIMARY KEY ("DB_ID");
+
+
+--
+-- Name: DB_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+    ADD CONSTRAINT "DB_PRIVS_pkey" PRIMARY KEY ("DB_GRANT_ID");
+
+
+--
+-- Name: GLOBALPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "GLOBAL_PRIVS"
+    ADD CONSTRAINT "GLOBALPRIVILEGEINDEX" UNIQUE ("PRINCIPAL_NAME", "PRINCIPAL_TYPE", "USER_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: GLOBAL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "GLOBAL_PRIVS"
+    ADD CONSTRAINT "GLOBAL_PRIVS_pkey" PRIMARY KEY ("USER_GRANT_ID");
+
+
+--
+-- Name: IDXS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_pkey" PRIMARY KEY ("INDEX_ID");
+
+
+--
+-- Name: INDEX_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "INDEX_PARAMS"
+    ADD CONSTRAINT "INDEX_PARAMS_pkey" PRIMARY KEY ("INDEX_ID", "PARAM_KEY");
+
+
+--
+-- Name: NUCLEUS_TABLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "NUCLEUS_TABLES"
+    ADD CONSTRAINT "NUCLEUS_TABLES_pkey" PRIMARY KEY ("CLASS_NAME");
+
+
+--
+-- Name: PARTITIONS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "PARTITIONS_pkey" PRIMARY KEY ("PART_ID");
+
+
+--
+-- Name: PARTITION_EVENTS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_EVENTS"
+    ADD CONSTRAINT "PARTITION_EVENTS_pkey" PRIMARY KEY ("PART_NAME_ID");
+
+
+--
+-- Name: PARTITION_KEYS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_KEYS"
+    ADD CONSTRAINT "PARTITION_KEYS_pkey" PRIMARY KEY ("TBL_ID", "PKEY_NAME");
+
+
+--
+-- Name: PARTITION_KEY_VALS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_KEY_VALS"
+    ADD CONSTRAINT "PARTITION_KEY_VALS_pkey" PRIMARY KEY ("PART_ID", "INTEGER_IDX");
+
+
+--
+-- Name: PARTITION_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_PARAMS"
+    ADD CONSTRAINT "PARTITION_PARAMS_pkey" PRIMARY KEY ("PART_ID", "PARAM_KEY");
+
+
+--
+-- Name: PART_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PART_COL_PRIVS"
+    ADD CONSTRAINT "PART_COL_PRIVS_pkey" PRIMARY KEY ("PART_COLUMN_GRANT_ID");
+
+
+--
+-- Name: PART_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PART_PRIVS"
+    ADD CONSTRAINT "PART_PRIVS_pkey" PRIMARY KEY ("PART_GRANT_ID");
+
+
+--
+-- Name: ROLEENTITYINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLES"
+    ADD CONSTRAINT "ROLEENTITYINDEX" UNIQUE ("ROLE_NAME");
+
+
+--
+-- Name: ROLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLES"
+    ADD CONSTRAINT "ROLES_pkey" PRIMARY KEY ("ROLE_ID");
+
+
+--
+-- Name: ROLE_MAP_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+    ADD CONSTRAINT "ROLE_MAP_pkey" PRIMARY KEY ("ROLE_GRANT_ID");
+
+
+--
+-- Name: SDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SDS"
+    ADD CONSTRAINT "SDS_pkey" PRIMARY KEY ("SD_ID");
+
+
+--
+-- Name: SD_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SD_PARAMS"
+    ADD CONSTRAINT "SD_PARAMS_pkey" PRIMARY KEY ("SD_ID", "PARAM_KEY");
+
+
+--
+-- Name: SEQUENCE_TABLE_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SEQUENCE_TABLE"
+    ADD CONSTRAINT "SEQUENCE_TABLE_pkey" PRIMARY KEY ("SEQUENCE_NAME");
+
+
+--
+-- Name: SERDES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SERDES"
+    ADD CONSTRAINT "SERDES_pkey" PRIMARY KEY ("SERDE_ID");
+
+
+--
+-- Name: SERDE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SERDE_PARAMS"
+    ADD CONSTRAINT "SERDE_PARAMS_pkey" PRIMARY KEY ("SERDE_ID", "PARAM_KEY");
+
+
+--
+-- Name: SORT_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SORT_COLS"
+    ADD CONSTRAINT "SORT_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+
+--
+-- Name: TABLE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TABLE_PARAMS"
+    ADD CONSTRAINT "TABLE_PARAMS_pkey" PRIMARY KEY ("TBL_ID", "PARAM_KEY");
+
+
+--
+-- Name: TBLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "TBLS_pkey" PRIMARY KEY ("TBL_ID");
+
+
+--
+-- Name: TBL_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBL_COL_PRIVS"
+    ADD CONSTRAINT "TBL_COL_PRIVS_pkey" PRIMARY KEY ("TBL_COLUMN_GRANT_ID");
+
+
+--
+-- Name: TBL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBL_PRIVS"
+    ADD CONSTRAINT "TBL_PRIVS_pkey" PRIMARY KEY ("TBL_GRANT_ID");
+
+
+--
+-- Name: TYPES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPES"
+    ADD CONSTRAINT "TYPES_pkey" PRIMARY KEY ("TYPES_ID");
+
+
+--
+-- Name: TYPE_FIELDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPE_FIELDS"
+    ADD CONSTRAINT "TYPE_FIELDS_pkey" PRIMARY KEY ("TYPE_NAME", "FIELD_NAME");
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST"
+    ADD CONSTRAINT "SKEWED_STRING_LIST_pkey" PRIMARY KEY ("STRING_LIST_ID");
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
+    ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_pkey" PRIMARY KEY ("STRING_LIST_ID", "INTEGER_IDX");
+
+
+ALTER TABLE ONLY "SKEWED_COL_NAMES"
+    ADD CONSTRAINT "SKEWED_COL_NAMES_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_pkey" PRIMARY KEY ("SD_ID", "STRING_LIST_ID_KID");
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+    ADD CONSTRAINT "SKEWED_VALUES_pkey" PRIMARY KEY ("SD_ID_OID", "INTEGER_IDX");
+
+--
+-- Name: TAB_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_pkey" PRIMARY KEY("CS_ID");
+
+--
+-- Name: PART_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_pkey" PRIMARY KEY("CS_ID");
+
+--
+-- Name: UNIQUEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "UNIQUEINDEX" UNIQUE ("INDEX_NAME", "ORIG_TBL_ID");
+
+
+--
+-- Name: UNIQUEPARTITION; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "UNIQUEPARTITION" UNIQUE ("PART_NAME", "TBL_ID");
+
+
+--
+-- Name: UNIQUETABLE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "UNIQUETABLE" UNIQUE ("TBL_NAME", "DB_ID");
+
+
+--
+-- Name: UNIQUE_DATABASE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DBS"
+    ADD CONSTRAINT "UNIQUE_DATABASE" UNIQUE ("NAME");
+
+
+--
+-- Name: UNIQUE_TYPE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPES"
+    ADD CONSTRAINT "UNIQUE_TYPE" UNIQUE ("TYPE_NAME");
+
+
+--
+-- Name: USERROLEMAPINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+    ADD CONSTRAINT "USERROLEMAPINDEX" UNIQUE ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: BUCKETING_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "BUCKETING_COLS_N49" ON "BUCKETING_COLS" USING btree ("SD_ID");
+
+
+--
+-- Name: COLUMNS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "COLUMNS_N49" ON "COLUMNS_OLD" USING btree ("SD_ID");
+
+
+--
+-- Name: DATABASE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "DATABASE_PARAMS_N49" ON "DATABASE_PARAMS" USING btree ("DB_ID");
+
+
+--
+-- Name: DB_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "DB_PRIVS_N49" ON "DB_PRIVS" USING btree ("DB_ID");
+
+
+--
+-- Name: IDXS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N49" ON "IDXS" USING btree ("ORIG_TBL_ID");
+
+
+--
+-- Name: IDXS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N50" ON "IDXS" USING btree ("INDEX_TBL_ID");
+
+
+--
+-- Name: IDXS_N51; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N51" ON "IDXS" USING btree ("SD_ID");
+
+
+--
+-- Name: INDEX_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "INDEX_PARAMS_N49" ON "INDEX_PARAMS" USING btree ("INDEX_ID");
+
+
+--
+-- Name: PARTITIONCOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONCOLUMNPRIVILEGEINDEX" ON "PART_COL_PRIVS" USING btree ("PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: PARTITIONEVENTINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONEVENTINDEX" ON "PARTITION_EVENTS" USING btree ("PARTITION_NAME");
+
+
+--
+-- Name: PARTITIONS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONS_N49" ON "PARTITIONS" USING btree ("TBL_ID");
+
+
+--
+-- Name: PARTITIONS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONS_N50" ON "PARTITIONS" USING btree ("SD_ID");
+
+
+--
+-- Name: PARTITION_KEYS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_KEYS_N49" ON "PARTITION_KEYS" USING btree ("TBL_ID");
+
+
+--
+-- Name: PARTITION_KEY_VALS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_KEY_VALS_N49" ON "PARTITION_KEY_VALS" USING btree ("PART_ID");
+
+
+--
+-- Name: PARTITION_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_PARAMS_N49" ON "PARTITION_PARAMS" USING btree ("PART_ID");
+
+
+--
+-- Name: PARTPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTPRIVILEGEINDEX" ON "PART_PRIVS" USING btree ("PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: PART_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_COL_PRIVS_N49" ON "PART_COL_PRIVS" USING btree ("PART_ID");
+
+
+--
+-- Name: PART_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_PRIVS_N49" ON "PART_PRIVS" USING btree ("PART_ID");
+
+
+--
+-- Name: ROLE_MAP_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "ROLE_MAP_N49" ON "ROLE_MAP" USING btree ("ROLE_ID");
+
+
+--
+-- Name: SDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SDS_N49" ON "SDS" USING btree ("SERDE_ID");
+
+
+--
+-- Name: SD_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SD_PARAMS_N49" ON "SD_PARAMS" USING btree ("SD_ID");
+
+
+--
+-- Name: SERDE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SERDE_PARAMS_N49" ON "SERDE_PARAMS" USING btree ("SERDE_ID");
+
+
+--
+-- Name: SORT_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SORT_COLS_N49" ON "SORT_COLS" USING btree ("SD_ID");
+
+
+--
+-- Name: TABLECOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLECOLUMNPRIVILEGEINDEX" ON "TBL_COL_PRIVS" USING btree ("TBL_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: TABLEPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLEPRIVILEGEINDEX" ON "TBL_PRIVS" USING btree ("TBL_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: TABLE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLE_PARAMS_N49" ON "TABLE_PARAMS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TBLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBLS_N49" ON "TBLS" USING btree ("DB_ID");
+
+
+--
+-- Name: TBLS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBLS_N50" ON "TBLS" USING btree ("SD_ID");
+
+
+--
+-- Name: TBL_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBL_COL_PRIVS_N49" ON "TBL_COL_PRIVS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TBL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBL_PRIVS_N49" ON "TBL_PRIVS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TYPE_FIELDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TYPE_FIELDS_N49" ON "TYPE_FIELDS" USING btree ("TYPE_NAME");
+
+--
+-- Name: TAB_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TAB_COL_STATS_N49" ON "TAB_COL_STATS" USING btree ("TBL_ID");
+
+--
+-- Name: PART_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_COL_STATS_N49" ON "PART_COL_STATS" USING btree ("PART_ID");
+
+--
+-- Name: UNIQUEFUNCTION; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE UNIQUE INDEX "UNIQUEFUNCTION" ON "FUNCS" ("FUNC_NAME", "DB_ID");
+
+--
+-- Name: FUNCS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "FUNCS_N49" ON "FUNCS" ("DB_ID");
+
+--
+-- Name: FUNC_RU_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "FUNC_RU_N49" ON "FUNC_RU" ("FUNC_ID");
+
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
+    ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_fkey" FOREIGN KEY ("STRING_LIST_ID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "SKEWED_COL_NAMES"
+    ADD CONSTRAINT "SKEWED_COL_NAMES_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey1" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey2" FOREIGN KEY ("STRING_LIST_ID_KID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+    ADD CONSTRAINT "SKEWED_VALUES_fkey1" FOREIGN KEY ("STRING_LIST_ID_EID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+    ADD CONSTRAINT "SKEWED_VALUES_fkey2" FOREIGN KEY ("SD_ID_OID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: BUCKETING_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "BUCKETING_COLS"
+    ADD CONSTRAINT "BUCKETING_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: COLUMNS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "COLUMNS_OLD"
+    ADD CONSTRAINT "COLUMNS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: COLUMNS_V2_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "COLUMNS_V2"
+    ADD CONSTRAINT "COLUMNS_V2_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
+
+
+--
+-- Name: DATABASE_PARAMS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "DATABASE_PARAMS"
+    ADD CONSTRAINT "DATABASE_PARAMS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: DB_PRIVS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+    ADD CONSTRAINT "DB_PRIVS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_INDEX_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_INDEX_TBL_ID_fkey" FOREIGN KEY ("INDEX_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_ORIG_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_ORIG_TBL_ID_fkey" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: INDEX_PARAMS_INDEX_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "INDEX_PARAMS"
+    ADD CONSTRAINT "INDEX_PARAMS_INDEX_ID_fkey" FOREIGN KEY ("INDEX_ID") REFERENCES "IDXS"("INDEX_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITIONS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "PARTITIONS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITIONS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "PARTITIONS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_KEYS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_KEYS"
+    ADD CONSTRAINT "PARTITION_KEYS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_KEY_VALS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_KEY_VALS"
+    ADD CONSTRAINT "PARTITION_KEY_VALS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_PARAMS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_PARAMS"
+    ADD CONSTRAINT "PARTITION_PARAMS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_COL_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PART_COL_PRIVS"
+    ADD CONSTRAINT "PART_COL_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PART_PRIVS"
+    ADD CONSTRAINT "PART_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: ROLE_MAP_ROLE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+    ADD CONSTRAINT "ROLE_MAP_ROLE_ID_fkey" FOREIGN KEY ("ROLE_ID") REFERENCES "ROLES"("ROLE_ID") DEFERRABLE;
+
+
+--
+-- Name: SDS_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SDS"
+    ADD CONSTRAINT "SDS_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
+
+
+--
+-- Name: SDS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SDS"
+    ADD CONSTRAINT "SDS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
+
+
+--
+-- Name: SD_PARAMS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SD_PARAMS"
+    ADD CONSTRAINT "SD_PARAMS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: SERDE_PARAMS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SERDE_PARAMS"
+    ADD CONSTRAINT "SERDE_PARAMS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
+
+
+--
+-- Name: SORT_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SORT_COLS"
+    ADD CONSTRAINT "SORT_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: TABLE_PARAMS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TABLE_PARAMS"
+    ADD CONSTRAINT "TABLE_PARAMS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TBLS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "TBLS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: TBLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "TBLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: TBL_COL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBL_COL_PRIVS"
+    ADD CONSTRAINT "TBL_COL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TBL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBL_PRIVS"
+    ADD CONSTRAINT "TBL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TYPE_FIELDS_TYPE_NAME_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TYPE_FIELDS"
+    ADD CONSTRAINT "TYPE_FIELDS_TYPE_NAME_fkey" FOREIGN KEY ("TYPE_NAME") REFERENCES "TYPES"("TYPES_ID") DEFERRABLE;
+
+--
+-- Name: TAB_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_fkey" FOREIGN KEY("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_fkey" FOREIGN KEY("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "VERSION" ADD CONSTRAINT "VERSION_pkey" PRIMARY KEY ("VER_ID");
+
+-- Name: FUNCS_FK1; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ALTER TABLE ONLY "FUNCS"
+    ADD CONSTRAINT "FUNCS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "DBS" ("DB_ID") DEFERRABLE;
+
+-- Name: FUNC_RU_FK1; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ALTER TABLE ONLY "FUNC_RU"
+    ADD CONSTRAINT "FUNC_RU_FK1" FOREIGN KEY ("FUNC_ID") REFERENCES "FUNCS" ("FUNC_ID") DEFERRABLE;
+
+--
+-- Name: public; Type: ACL; Schema: -; Owner: hiveuser
+--
+
+REVOKE ALL ON SCHEMA public FROM PUBLIC;
+GRANT ALL ON SCHEMA public TO PUBLIC;
+
+
+--
+-- PostgreSQL database dump complete
+--
+
+-- -----------------------------------------------------------------------------------------------------------------------------------------------
+-- Transaction and lock tables
+-- These are not part of package jdo, so if you are going to regenerate this file you need to manually add the following section back to the file.
+-- -----------------------------------------------------------------------------------------------------------------------------------------------
+
+CREATE TABLE TXNS (
+  TXN_ID bigint PRIMARY KEY,
+  TXN_STATE char(1) NOT NULL,
+  TXN_STARTED bigint NOT NULL,
+  TXN_LAST_HEARTBEAT bigint NOT NULL,
+  TXN_USER varchar(128) NOT NULL,
+  TXN_HOST varchar(128) NOT NULL
+);
+
+CREATE TABLE TXN_COMPONENTS (
+  TC_TXNID bigint REFERENCES TXNS (TXN_ID),
+  TC_DATABASE varchar(128) NOT NULL,
+  TC_TABLE varchar(128),
+  TC_PARTITION varchar(767) DEFAULT NULL
+);
+
+CREATE TABLE COMPLETED_TXN_COMPONENTS (
+  CTC_TXNID bigint,
+  CTC_DATABASE varchar(128) NOT NULL,
+  CTC_TABLE varchar(128),
+  CTC_PARTITION varchar(767)
+);
+
+CREATE TABLE NEXT_TXN_ID (
+  NTXN_NEXT bigint NOT NULL
+);
+INSERT INTO NEXT_TXN_ID VALUES(1);
+
+CREATE TABLE HIVE_LOCKS (
+  HL_LOCK_EXT_ID bigint NOT NULL,
+  HL_LOCK_INT_ID bigint NOT NULL,
+  HL_TXNID bigint,
+  HL_DB varchar(128) NOT NULL,
+  HL_TABLE varchar(128),
+  HL_PARTITION varchar(767) DEFAULT NULL,
+  HL_LOCK_STATE char(1) NOT NULL,
+  HL_LOCK_TYPE char(1) NOT NULL,
+  HL_LAST_HEARTBEAT bigint NOT NULL,
+  HL_ACQUIRED_AT bigint,
+  HL_USER varchar(128) NOT NULL,
+  HL_HOST varchar(128) NOT NULL,
+  PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID)
+);
+
+CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS USING hash (HL_TXNID);
+
+CREATE TABLE NEXT_LOCK_ID (
+  NL_NEXT bigint NOT NULL
+);
+INSERT INTO NEXT_LOCK_ID VALUES(1);
+
+CREATE TABLE COMPACTION_QUEUE (
+  CQ_ID bigint PRIMARY KEY,
+  CQ_DATABASE varchar(128) NOT NULL,
+  CQ_TABLE varchar(128) NOT NULL,
+  CQ_PARTITION varchar(767),
+  CQ_STATE char(1) NOT NULL,
+  CQ_TYPE char(1) NOT NULL,
+  CQ_WORKER_ID varchar(128),
+  CQ_START bigint,
+  CQ_RUN_AS varchar(128)
+);
+
+CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
+  NCQ_NEXT bigint NOT NULL
+);
+INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+
+
+-- -----------------------------------------------------------------
+-- Record schema version. Should be the last step in the init script
+-- -----------------------------------------------------------------
+INSERT INTO "VERSION" ("VER_ID", "SCHEMA_VERSION", "VERSION_COMMENT") VALUES (1, '0.13.0', 'Hive release version 0.13.0');


[27/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/etc/hive-schema-0.12.0.postgres.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/etc/hive-schema-0.12.0.postgres.sql b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/etc/hive-schema-0.12.0.postgres.sql
new file mode 100755
index 0000000..61769f6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/etc/hive-schema-0.12.0.postgres.sql
@@ -0,0 +1,1405 @@
+--
+-- PostgreSQL database dump
+--
+
+SET statement_timeout = 0;
+SET client_encoding = 'UTF8';
+SET standard_conforming_strings = off;
+SET check_function_bodies = false;
+SET client_min_messages = warning;
+SET escape_string_warning = off;
+
+SET search_path = public, pg_catalog;
+
+SET default_tablespace = '';
+
+SET default_with_oids = false;
+
+--
+-- Name: BUCKETING_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "BUCKETING_COLS" (
+    "SD_ID" bigint NOT NULL,
+    "BUCKET_COL_NAME" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: CDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "CDS" (
+    "CD_ID" bigint NOT NULL
+);
+
+
+--
+-- Name: COLUMNS_OLD; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "COLUMNS_OLD" (
+    "SD_ID" bigint NOT NULL,
+    "COMMENT" character varying(256) DEFAULT NULL::character varying,
+    "COLUMN_NAME" character varying(128) NOT NULL,
+    "TYPE_NAME" character varying(4000) NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: COLUMNS_V2; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "COLUMNS_V2" (
+    "CD_ID" bigint NOT NULL,
+    "COMMENT" character varying(4000),
+    "COLUMN_NAME" character varying(128) NOT NULL,
+    "TYPE_NAME" character varying(4000),
+    "INTEGER_IDX" integer NOT NULL
+);
+
+
+--
+-- Name: DATABASE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DATABASE_PARAMS" (
+    "DB_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(180) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: DBS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DBS" (
+    "DB_ID" bigint NOT NULL,
+    "DESC" character varying(4000) DEFAULT NULL::character varying,
+    "DB_LOCATION_URI" character varying(4000) NOT NULL,
+    "NAME" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: DB_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DB_PRIVS" (
+    "DB_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "DB_ID" bigint,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "DB_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: GLOBAL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "GLOBAL_PRIVS" (
+    "USER_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "USER_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: IDXS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "IDXS" (
+    "INDEX_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "DEFERRED_REBUILD" boolean NOT NULL,
+    "INDEX_HANDLER_CLASS" character varying(4000) DEFAULT NULL::character varying,
+    "INDEX_NAME" character varying(128) DEFAULT NULL::character varying,
+    "INDEX_TBL_ID" bigint,
+    "LAST_ACCESS_TIME" bigint NOT NULL,
+    "ORIG_TBL_ID" bigint,
+    "SD_ID" bigint
+);
+
+
+--
+-- Name: INDEX_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "INDEX_PARAMS" (
+    "INDEX_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: NUCLEUS_TABLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "NUCLEUS_TABLES" (
+    "CLASS_NAME" character varying(128) NOT NULL,
+    "TABLE_NAME" character varying(128) NOT NULL,
+    "TYPE" character varying(4) NOT NULL,
+    "OWNER" character varying(2) NOT NULL,
+    "VERSION" character varying(20) NOT NULL,
+    "INTERFACE_NAME" character varying(255) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PARTITIONS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITIONS" (
+    "PART_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "LAST_ACCESS_TIME" bigint NOT NULL,
+    "PART_NAME" character varying(767) DEFAULT NULL::character varying,
+    "SD_ID" bigint,
+    "TBL_ID" bigint
+);
+
+
+--
+-- Name: PARTITION_EVENTS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_EVENTS" (
+    "PART_NAME_ID" bigint NOT NULL,
+    "DB_NAME" character varying(128),
+    "EVENT_TIME" bigint NOT NULL,
+    "EVENT_TYPE" integer NOT NULL,
+    "PARTITION_NAME" character varying(767),
+    "TBL_NAME" character varying(128)
+);
+
+
+--
+-- Name: PARTITION_KEYS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_KEYS" (
+    "TBL_ID" bigint NOT NULL,
+    "PKEY_COMMENT" character varying(4000) DEFAULT NULL::character varying,
+    "PKEY_NAME" character varying(128) NOT NULL,
+    "PKEY_TYPE" character varying(767) NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: PARTITION_KEY_VALS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_KEY_VALS" (
+    "PART_ID" bigint NOT NULL,
+    "PART_KEY_VAL" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: PARTITION_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_PARAMS" (
+    "PART_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PART_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_COL_PRIVS" (
+    "PART_COLUMN_GRANT_ID" bigint NOT NULL,
+    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_ID" bigint,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_COL_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PART_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_PRIVS" (
+    "PART_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_ID" bigint,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: ROLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "ROLES" (
+    "ROLE_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "OWNER_NAME" character varying(128) DEFAULT NULL::character varying,
+    "ROLE_NAME" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: ROLE_MAP; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "ROLE_MAP" (
+    "ROLE_GRANT_ID" bigint NOT NULL,
+    "ADD_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "ROLE_ID" bigint
+);
+
+
+--
+-- Name: SDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SDS" (
+    "SD_ID" bigint NOT NULL,
+    "INPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
+    "IS_COMPRESSED" boolean NOT NULL,
+    "LOCATION" character varying(4000) DEFAULT NULL::character varying,
+    "NUM_BUCKETS" bigint NOT NULL,
+    "OUTPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
+    "SERDE_ID" bigint,
+    "CD_ID" bigint,
+    "IS_STOREDASSUBDIRECTORIES" boolean NOT NULL
+);
+
+
+--
+-- Name: SD_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SD_PARAMS" (
+    "SD_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: SEQUENCE_TABLE; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SEQUENCE_TABLE" (
+    "SEQUENCE_NAME" character varying(255) NOT NULL,
+    "NEXT_VAL" bigint NOT NULL
+);
+
+
+--
+-- Name: SERDES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SERDES" (
+    "SERDE_ID" bigint NOT NULL,
+    "NAME" character varying(128) DEFAULT NULL::character varying,
+    "SLIB" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: SERDE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SERDE_PARAMS" (
+    "SERDE_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: SORT_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SORT_COLS" (
+    "SD_ID" bigint NOT NULL,
+    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+    "ORDER" bigint NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: TABLE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TABLE_PARAMS" (
+    "TBL_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: TBLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBLS" (
+    "TBL_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "DB_ID" bigint,
+    "LAST_ACCESS_TIME" bigint NOT NULL,
+    "OWNER" character varying(767) DEFAULT NULL::character varying,
+    "RETENTION" bigint NOT NULL,
+    "SD_ID" bigint,
+    "TBL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "TBL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "VIEW_EXPANDED_TEXT" text,
+    "VIEW_ORIGINAL_TEXT" text
+);
+
+
+--
+-- Name: TBL_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBL_COL_PRIVS" (
+    "TBL_COLUMN_GRANT_ID" bigint NOT NULL,
+    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "TBL_COL_PRIV" character varying(128) DEFAULT NULL::character varying,
+    "TBL_ID" bigint
+);
+
+
+--
+-- Name: TBL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBL_PRIVS" (
+    "TBL_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "TBL_PRIV" character varying(128) DEFAULT NULL::character varying,
+    "TBL_ID" bigint
+);
+
+
+--
+-- Name: TYPES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TYPES" (
+    "TYPES_ID" bigint NOT NULL,
+    "TYPE_NAME" character varying(128) DEFAULT NULL::character varying,
+    "TYPE1" character varying(767) DEFAULT NULL::character varying,
+    "TYPE2" character varying(767) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: TYPE_FIELDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TYPE_FIELDS" (
+    "TYPE_NAME" bigint NOT NULL,
+    "COMMENT" character varying(256) DEFAULT NULL::character varying,
+    "FIELD_NAME" character varying(128) NOT NULL,
+    "FIELD_TYPE" character varying(767) NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_STRING_LIST" (
+    "STRING_LIST_ID" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_STRING_LIST_VALUES" (
+    "STRING_LIST_ID" bigint NOT NULL,
+    "STRING_LIST_VALUE" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_COL_NAMES" (
+    "SD_ID" bigint NOT NULL,
+    "SKEWED_COL_NAME" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_COL_VALUE_LOC_MAP" (
+    "SD_ID" bigint NOT NULL,
+    "STRING_LIST_ID_KID" bigint NOT NULL,
+    "LOCATION" character varying(4000) DEFAULT NULL::character varying
+);
+
+CREATE TABLE "SKEWED_VALUES" (
+    "SD_ID_OID" bigint NOT NULL,
+    "STRING_LIST_ID_EID" bigint NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: TAB_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE  "MASTER_KEYS"
+(
+    "KEY_ID" SERIAL,
+    "MASTER_KEY" varchar(767) NULL,
+    PRIMARY KEY ("KEY_ID")
+);
+
+CREATE TABLE  "DELEGATION_TOKENS"
+(
+    "TOKEN_IDENT" varchar(767) NOT NULL,
+    "TOKEN" varchar(767) NULL,
+    PRIMARY KEY ("TOKEN_IDENT")
+);
+
+CREATE TABLE "TAB_COL_STATS" (
+ "CS_ID" bigint NOT NULL,
+ "DB_NAME" character varying(128) DEFAULT NULL::character varying,
+ "TABLE_NAME" character varying(128) DEFAULT NULL::character varying,
+ "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+ "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "TBL_ID" bigint NOT NULL,
+ "LONG_LOW_VALUE" bigint,
+ "LONG_HIGH_VALUE" bigint,
+ "DOUBLE_LOW_VALUE" double precision,
+ "DOUBLE_HIGH_VALUE" double precision,
+ "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "NUM_NULLS" bigint NOT NULL,
+ "NUM_DISTINCTS" bigint,
+ "AVG_COL_LEN" double precision,
+ "MAX_COL_LEN" bigint,
+ "NUM_TRUES" bigint,
+ "NUM_FALSES" bigint,
+ "LAST_ANALYZED" bigint NOT NULL
+);
+
+--
+-- Table structure for VERSION
+--
+CREATE TABLE "VERSION" (
+  "VER_ID" bigint,
+  "SCHEMA_VERSION" character varying(127) NOT NULL,
+  "VERSION_COMMENT" character varying(255) NOT NULL
+);
+
+--
+-- Name: PART_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_COL_STATS" (
+ "CS_ID" bigint NOT NULL,
+ "DB_NAME" character varying(128) DEFAULT NULL::character varying,
+ "TABLE_NAME" character varying(128) DEFAULT NULL::character varying,
+ "PARTITION_NAME" character varying(767) DEFAULT NULL::character varying,
+ "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+ "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "PART_ID" bigint NOT NULL,
+ "LONG_LOW_VALUE" bigint,
+ "LONG_HIGH_VALUE" bigint,
+ "DOUBLE_LOW_VALUE" double precision,
+ "DOUBLE_HIGH_VALUE" double precision,
+ "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "NUM_NULLS" bigint NOT NULL,
+ "NUM_DISTINCTS" bigint,
+ "AVG_COL_LEN" double precision,
+ "MAX_COL_LEN" bigint,
+ "NUM_TRUES" bigint,
+ "NUM_FALSES" bigint,
+ "LAST_ANALYZED" bigint NOT NULL
+);
+
+--
+-- Name: BUCKETING_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "BUCKETING_COLS"
+    ADD CONSTRAINT "BUCKETING_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+
+--
+-- Name: CDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "CDS"
+    ADD CONSTRAINT "CDS_pkey" PRIMARY KEY ("CD_ID");
+
+
+--
+-- Name: COLUMNS_V2_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "COLUMNS_V2"
+    ADD CONSTRAINT "COLUMNS_V2_pkey" PRIMARY KEY ("CD_ID", "COLUMN_NAME");
+
+
+--
+-- Name: COLUMNS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "COLUMNS_OLD"
+    ADD CONSTRAINT "COLUMNS_pkey" PRIMARY KEY ("SD_ID", "COLUMN_NAME");
+
+
+--
+-- Name: DATABASE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DATABASE_PARAMS"
+    ADD CONSTRAINT "DATABASE_PARAMS_pkey" PRIMARY KEY ("DB_ID", "PARAM_KEY");
+
+
+--
+-- Name: DBPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+    ADD CONSTRAINT "DBPRIVILEGEINDEX" UNIQUE ("DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: DBS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DBS"
+    ADD CONSTRAINT "DBS_pkey" PRIMARY KEY ("DB_ID");
+
+
+--
+-- Name: DB_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+    ADD CONSTRAINT "DB_PRIVS_pkey" PRIMARY KEY ("DB_GRANT_ID");
+
+
+--
+-- Name: GLOBALPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "GLOBAL_PRIVS"
+    ADD CONSTRAINT "GLOBALPRIVILEGEINDEX" UNIQUE ("PRINCIPAL_NAME", "PRINCIPAL_TYPE", "USER_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: GLOBAL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "GLOBAL_PRIVS"
+    ADD CONSTRAINT "GLOBAL_PRIVS_pkey" PRIMARY KEY ("USER_GRANT_ID");
+
+
+--
+-- Name: IDXS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_pkey" PRIMARY KEY ("INDEX_ID");
+
+
+--
+-- Name: INDEX_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "INDEX_PARAMS"
+    ADD CONSTRAINT "INDEX_PARAMS_pkey" PRIMARY KEY ("INDEX_ID", "PARAM_KEY");
+
+
+--
+-- Name: NUCLEUS_TABLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "NUCLEUS_TABLES"
+    ADD CONSTRAINT "NUCLEUS_TABLES_pkey" PRIMARY KEY ("CLASS_NAME");
+
+
+--
+-- Name: PARTITIONS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "PARTITIONS_pkey" PRIMARY KEY ("PART_ID");
+
+
+--
+-- Name: PARTITION_EVENTS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_EVENTS"
+    ADD CONSTRAINT "PARTITION_EVENTS_pkey" PRIMARY KEY ("PART_NAME_ID");
+
+
+--
+-- Name: PARTITION_KEYS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_KEYS"
+    ADD CONSTRAINT "PARTITION_KEYS_pkey" PRIMARY KEY ("TBL_ID", "PKEY_NAME");
+
+
+--
+-- Name: PARTITION_KEY_VALS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_KEY_VALS"
+    ADD CONSTRAINT "PARTITION_KEY_VALS_pkey" PRIMARY KEY ("PART_ID", "INTEGER_IDX");
+
+
+--
+-- Name: PARTITION_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_PARAMS"
+    ADD CONSTRAINT "PARTITION_PARAMS_pkey" PRIMARY KEY ("PART_ID", "PARAM_KEY");
+
+
+--
+-- Name: PART_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PART_COL_PRIVS"
+    ADD CONSTRAINT "PART_COL_PRIVS_pkey" PRIMARY KEY ("PART_COLUMN_GRANT_ID");
+
+
+--
+-- Name: PART_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PART_PRIVS"
+    ADD CONSTRAINT "PART_PRIVS_pkey" PRIMARY KEY ("PART_GRANT_ID");
+
+
+--
+-- Name: ROLEENTITYINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLES"
+    ADD CONSTRAINT "ROLEENTITYINDEX" UNIQUE ("ROLE_NAME");
+
+
+--
+-- Name: ROLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLES"
+    ADD CONSTRAINT "ROLES_pkey" PRIMARY KEY ("ROLE_ID");
+
+
+--
+-- Name: ROLE_MAP_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+    ADD CONSTRAINT "ROLE_MAP_pkey" PRIMARY KEY ("ROLE_GRANT_ID");
+
+
+--
+-- Name: SDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SDS"
+    ADD CONSTRAINT "SDS_pkey" PRIMARY KEY ("SD_ID");
+
+
+--
+-- Name: SD_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SD_PARAMS"
+    ADD CONSTRAINT "SD_PARAMS_pkey" PRIMARY KEY ("SD_ID", "PARAM_KEY");
+
+
+--
+-- Name: SEQUENCE_TABLE_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SEQUENCE_TABLE"
+    ADD CONSTRAINT "SEQUENCE_TABLE_pkey" PRIMARY KEY ("SEQUENCE_NAME");
+
+
+--
+-- Name: SERDES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SERDES"
+    ADD CONSTRAINT "SERDES_pkey" PRIMARY KEY ("SERDE_ID");
+
+
+--
+-- Name: SERDE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SERDE_PARAMS"
+    ADD CONSTRAINT "SERDE_PARAMS_pkey" PRIMARY KEY ("SERDE_ID", "PARAM_KEY");
+
+
+--
+-- Name: SORT_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SORT_COLS"
+    ADD CONSTRAINT "SORT_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+
+--
+-- Name: TABLE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TABLE_PARAMS"
+    ADD CONSTRAINT "TABLE_PARAMS_pkey" PRIMARY KEY ("TBL_ID", "PARAM_KEY");
+
+
+--
+-- Name: TBLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "TBLS_pkey" PRIMARY KEY ("TBL_ID");
+
+
+--
+-- Name: TBL_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBL_COL_PRIVS"
+    ADD CONSTRAINT "TBL_COL_PRIVS_pkey" PRIMARY KEY ("TBL_COLUMN_GRANT_ID");
+
+
+--
+-- Name: TBL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBL_PRIVS"
+    ADD CONSTRAINT "TBL_PRIVS_pkey" PRIMARY KEY ("TBL_GRANT_ID");
+
+
+--
+-- Name: TYPES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPES"
+    ADD CONSTRAINT "TYPES_pkey" PRIMARY KEY ("TYPES_ID");
+
+
+--
+-- Name: TYPE_FIELDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPE_FIELDS"
+    ADD CONSTRAINT "TYPE_FIELDS_pkey" PRIMARY KEY ("TYPE_NAME", "FIELD_NAME");
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST"
+    ADD CONSTRAINT "SKEWED_STRING_LIST_pkey" PRIMARY KEY ("STRING_LIST_ID");
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
+    ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_pkey" PRIMARY KEY ("STRING_LIST_ID", "INTEGER_IDX");
+
+
+ALTER TABLE ONLY "SKEWED_COL_NAMES"
+    ADD CONSTRAINT "SKEWED_COL_NAMES_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_pkey" PRIMARY KEY ("SD_ID", "STRING_LIST_ID_KID");
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+    ADD CONSTRAINT "SKEWED_VALUES_pkey" PRIMARY KEY ("SD_ID_OID", "INTEGER_IDX");
+
+--
+-- Name: TAB_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_pkey" PRIMARY KEY("CS_ID");
+
+--
+-- Name: PART_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_pkey" PRIMARY KEY("CS_ID");
+
+--
+-- Name: UNIQUEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "UNIQUEINDEX" UNIQUE ("INDEX_NAME", "ORIG_TBL_ID");
+
+
+--
+-- Name: UNIQUEPARTITION; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "UNIQUEPARTITION" UNIQUE ("PART_NAME", "TBL_ID");
+
+
+--
+-- Name: UNIQUETABLE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "UNIQUETABLE" UNIQUE ("TBL_NAME", "DB_ID");
+
+
+--
+-- Name: UNIQUE_DATABASE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DBS"
+    ADD CONSTRAINT "UNIQUE_DATABASE" UNIQUE ("NAME");
+
+
+--
+-- Name: UNIQUE_TYPE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPES"
+    ADD CONSTRAINT "UNIQUE_TYPE" UNIQUE ("TYPE_NAME");
+
+
+--
+-- Name: USERROLEMAPINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+    ADD CONSTRAINT "USERROLEMAPINDEX" UNIQUE ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: BUCKETING_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "BUCKETING_COLS_N49" ON "BUCKETING_COLS" USING btree ("SD_ID");
+
+
+--
+-- Name: COLUMNS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "COLUMNS_N49" ON "COLUMNS_OLD" USING btree ("SD_ID");
+
+
+--
+-- Name: DATABASE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "DATABASE_PARAMS_N49" ON "DATABASE_PARAMS" USING btree ("DB_ID");
+
+
+--
+-- Name: DB_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "DB_PRIVS_N49" ON "DB_PRIVS" USING btree ("DB_ID");
+
+
+--
+-- Name: IDXS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N49" ON "IDXS" USING btree ("ORIG_TBL_ID");
+
+
+--
+-- Name: IDXS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N50" ON "IDXS" USING btree ("INDEX_TBL_ID");
+
+
+--
+-- Name: IDXS_N51; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N51" ON "IDXS" USING btree ("SD_ID");
+
+
+--
+-- Name: INDEX_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "INDEX_PARAMS_N49" ON "INDEX_PARAMS" USING btree ("INDEX_ID");
+
+
+--
+-- Name: PARTITIONCOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONCOLUMNPRIVILEGEINDEX" ON "PART_COL_PRIVS" USING btree ("PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: PARTITIONEVENTINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONEVENTINDEX" ON "PARTITION_EVENTS" USING btree ("PARTITION_NAME");
+
+
+--
+-- Name: PARTITIONS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONS_N49" ON "PARTITIONS" USING btree ("TBL_ID");
+
+
+--
+-- Name: PARTITIONS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONS_N50" ON "PARTITIONS" USING btree ("SD_ID");
+
+
+--
+-- Name: PARTITION_KEYS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_KEYS_N49" ON "PARTITION_KEYS" USING btree ("TBL_ID");
+
+
+--
+-- Name: PARTITION_KEY_VALS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_KEY_VALS_N49" ON "PARTITION_KEY_VALS" USING btree ("PART_ID");
+
+
+--
+-- Name: PARTITION_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_PARAMS_N49" ON "PARTITION_PARAMS" USING btree ("PART_ID");
+
+
+--
+-- Name: PARTPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTPRIVILEGEINDEX" ON "PART_PRIVS" USING btree ("PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: PART_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_COL_PRIVS_N49" ON "PART_COL_PRIVS" USING btree ("PART_ID");
+
+
+--
+-- Name: PART_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_PRIVS_N49" ON "PART_PRIVS" USING btree ("PART_ID");
+
+
+--
+-- Name: ROLE_MAP_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "ROLE_MAP_N49" ON "ROLE_MAP" USING btree ("ROLE_ID");
+
+
+--
+-- Name: SDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SDS_N49" ON "SDS" USING btree ("SERDE_ID");
+
+
+--
+-- Name: SD_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SD_PARAMS_N49" ON "SD_PARAMS" USING btree ("SD_ID");
+
+
+--
+-- Name: SERDE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SERDE_PARAMS_N49" ON "SERDE_PARAMS" USING btree ("SERDE_ID");
+
+
+--
+-- Name: SORT_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SORT_COLS_N49" ON "SORT_COLS" USING btree ("SD_ID");
+
+
+--
+-- Name: TABLECOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLECOLUMNPRIVILEGEINDEX" ON "TBL_COL_PRIVS" USING btree ("TBL_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: TABLEPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLEPRIVILEGEINDEX" ON "TBL_PRIVS" USING btree ("TBL_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: TABLE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLE_PARAMS_N49" ON "TABLE_PARAMS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TBLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBLS_N49" ON "TBLS" USING btree ("DB_ID");
+
+
+--
+-- Name: TBLS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBLS_N50" ON "TBLS" USING btree ("SD_ID");
+
+
+--
+-- Name: TBL_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBL_COL_PRIVS_N49" ON "TBL_COL_PRIVS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TBL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBL_PRIVS_N49" ON "TBL_PRIVS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TYPE_FIELDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TYPE_FIELDS_N49" ON "TYPE_FIELDS" USING btree ("TYPE_NAME");
+
+--
+-- Name: TAB_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TAB_COL_STATS_N49" ON "TAB_COL_STATS" USING btree ("TBL_ID");
+
+--
+-- Name: PART_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_COL_STATS_N49" ON "PART_COL_STATS" USING btree ("PART_ID");
+
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
+    ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_fkey" FOREIGN KEY ("STRING_LIST_ID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "SKEWED_COL_NAMES"
+    ADD CONSTRAINT "SKEWED_COL_NAMES_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey1" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey2" FOREIGN KEY ("STRING_LIST_ID_KID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+    ADD CONSTRAINT "SKEWED_VALUES_fkey1" FOREIGN KEY ("STRING_LIST_ID_EID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+    ADD CONSTRAINT "SKEWED_VALUES_fkey2" FOREIGN KEY ("SD_ID_OID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: BUCKETING_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "BUCKETING_COLS"
+    ADD CONSTRAINT "BUCKETING_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: COLUMNS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "COLUMNS_OLD"
+    ADD CONSTRAINT "COLUMNS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: COLUMNS_V2_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "COLUMNS_V2"
+    ADD CONSTRAINT "COLUMNS_V2_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
+
+
+--
+-- Name: DATABASE_PARAMS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "DATABASE_PARAMS"
+    ADD CONSTRAINT "DATABASE_PARAMS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: DB_PRIVS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+    ADD CONSTRAINT "DB_PRIVS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_INDEX_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_INDEX_TBL_ID_fkey" FOREIGN KEY ("INDEX_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_ORIG_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_ORIG_TBL_ID_fkey" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: INDEX_PARAMS_INDEX_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "INDEX_PARAMS"
+    ADD CONSTRAINT "INDEX_PARAMS_INDEX_ID_fkey" FOREIGN KEY ("INDEX_ID") REFERENCES "IDXS"("INDEX_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITIONS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "PARTITIONS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITIONS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "PARTITIONS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_KEYS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_KEYS"
+    ADD CONSTRAINT "PARTITION_KEYS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_KEY_VALS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_KEY_VALS"
+    ADD CONSTRAINT "PARTITION_KEY_VALS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_PARAMS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_PARAMS"
+    ADD CONSTRAINT "PARTITION_PARAMS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_COL_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PART_COL_PRIVS"
+    ADD CONSTRAINT "PART_COL_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PART_PRIVS"
+    ADD CONSTRAINT "PART_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: ROLE_MAP_ROLE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+    ADD CONSTRAINT "ROLE_MAP_ROLE_ID_fkey" FOREIGN KEY ("ROLE_ID") REFERENCES "ROLES"("ROLE_ID") DEFERRABLE;
+
+
+--
+-- Name: SDS_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SDS"
+    ADD CONSTRAINT "SDS_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
+
+
+--
+-- Name: SDS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SDS"
+    ADD CONSTRAINT "SDS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
+
+
+--
+-- Name: SD_PARAMS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SD_PARAMS"
+    ADD CONSTRAINT "SD_PARAMS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: SERDE_PARAMS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SERDE_PARAMS"
+    ADD CONSTRAINT "SERDE_PARAMS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
+
+
+--
+-- Name: SORT_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SORT_COLS"
+    ADD CONSTRAINT "SORT_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: TABLE_PARAMS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TABLE_PARAMS"
+    ADD CONSTRAINT "TABLE_PARAMS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TBLS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "TBLS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: TBLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "TBLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: TBL_COL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBL_COL_PRIVS"
+    ADD CONSTRAINT "TBL_COL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TBL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBL_PRIVS"
+    ADD CONSTRAINT "TBL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TYPE_FIELDS_TYPE_NAME_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TYPE_FIELDS"
+    ADD CONSTRAINT "TYPE_FIELDS_TYPE_NAME_fkey" FOREIGN KEY ("TYPE_NAME") REFERENCES "TYPES"("TYPES_ID") DEFERRABLE;
+
+--
+-- Name: TAB_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_fkey" FOREIGN KEY("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_fkey" FOREIGN KEY("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "VERSION" ADD CONSTRAINT "VERSION_pkey" PRIMARY KEY ("VER_ID");
+
+--
+-- Name: public; Type: ACL; Schema: -; Owner: hiveuser
+--
+
+REVOKE ALL ON SCHEMA public FROM PUBLIC;
+GRANT ALL ON SCHEMA public TO PUBLIC;
+
+
+INSERT INTO "VERSION" ("VER_ID", "SCHEMA_VERSION", "VERSION_COMMENT") VALUES (1, '0.12.0', 'Hive release version 0.12.0');
+--
+-- PostgreSQL database dump complete
+--

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/files/addMysqlUser.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/files/addMysqlUser.sh b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/files/addMysqlUser.sh
new file mode 100755
index 0000000..80b88b1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/files/addMysqlUser.sh
@@ -0,0 +1,37 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+mysqldservice=$1
+mysqldbuser=$2
+mysqldbpasswd=$3
+userhost=$4
+
+# The restart (not start) is required to pick up mysql configuration changes made by sed
+# during install, in case mysql is already started. The changes are required by Hive later on.
+/var/lib/ambari-agent/ambari-sudo.sh service $mysqldservice restart
+
+echo "Adding user $mysqldbuser@% and removing users with empty name"
+/var/lib/ambari-agent/ambari-sudo.sh su mysql -s /bin/bash - -c "mysql -u root -e \"CREATE USER '$mysqldbuser'@'%' IDENTIFIED BY '$mysqldbpasswd';\""
+/var/lib/ambari-agent/ambari-sudo.sh su mysql -s /bin/bash - -c "mysql -u root -e \"GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'%';\""
+/var/lib/ambari-agent/ambari-sudo.sh su mysql -s /bin/bash - -c "mysql -u root -e \"DELETE FROM mysql.user WHERE user='';\""
+/var/lib/ambari-agent/ambari-sudo.sh su mysql -s /bin/bash - -c "mysql -u root -e \"flush privileges;\""
+/var/lib/ambari-agent/ambari-sudo.sh service $mysqldservice stop

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/files/hcatSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/files/hcatSmoke.sh b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/files/hcatSmoke.sh
new file mode 100755
index 0000000..d1e2ff1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/files/hcatSmoke.sh
@@ -0,0 +1,36 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+export tablename=$1
+
+case "$2" in
+
+prepare)
+  hcat -e "show tables"
+  hcat -e "drop table IF EXISTS ${tablename}"
+  hcat -e "create table ${tablename} ( id INT, name string ) stored as rcfile ;"
+;;
+
+cleanup)
+  hcat -e "drop table IF EXISTS ${tablename}"
+;;
+
+esac

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/files/hiveSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/files/hiveSmoke.sh b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/files/hiveSmoke.sh
new file mode 100755
index 0000000..f9f2020
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/files/hiveSmoke.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+export tablename=$1
+echo "CREATE EXTERNAL TABLE IF NOT EXISTS ${tablename} ( foo INT, bar STRING );" | hive
+echo "DESCRIBE ${tablename};" | hive

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/files/hiveserver2.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/files/hiveserver2.sql b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/files/hiveserver2.sql
new file mode 100755
index 0000000..99a3865
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/files/hiveserver2.sql
@@ -0,0 +1,23 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+CREATE EXTERNAL TABLE IF NOT EXISTS hiveserver2smoke20408 ( foo INT, bar STRING );
+DESCRIBE hiveserver2smoke20408;

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/files/hiveserver2Smoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/files/hiveserver2Smoke.sh b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/files/hiveserver2Smoke.sh
new file mode 100755
index 0000000..77d7b3e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/files/hiveserver2Smoke.sh
@@ -0,0 +1,32 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+smokeout=`/usr/lib/hive/bin/beeline -u $1 -n fakeuser -p fakepwd -d org.apache.hive.jdbc.HiveDriver -e '!run $2' 2>&1| awk '{print}'|grep Error`
+
+if [ "x$smokeout" == "x" ]; then
+  echo "Smoke test of hiveserver2 passed"
+  exit 0
+else
+  echo "Smoke test of hiveserver2 wasnt passed"
+  echo $smokeout
+  exit 1
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/files/pigSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/files/pigSmoke.sh b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/files/pigSmoke.sh
new file mode 100755
index 0000000..2e90ac0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/files/pigSmoke.sh
@@ -0,0 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+A = load 'passwd' using PigStorage(':');
+B = foreach A generate \$0 as id;
+store B into 'pigsmoke.out';

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/files/removeMysqlUser.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/files/removeMysqlUser.sh b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/files/removeMysqlUser.sh
new file mode 100755
index 0000000..7b6d331
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/files/removeMysqlUser.sh
@@ -0,0 +1,33 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+mysqldservice=$1
+mysqldbuser=$2
+userhost=$3
+myhostname=$(hostname -f)
+sudo_prefix = "/var/lib/ambari-agent/ambari-sudo.sh -H -E"
+
+$sudo_prefix service $mysqldservice start
+echo "Removing user $mysqldbuser@$userhost"
+/var/lib/ambari-agent/ambari-sudo.sh su mysql -s /bin/bash - -c "mysql -u root -e \"DROP USER '$mysqldbuser'@'$userhost';\""
+/var/lib/ambari-agent/ambari-sudo.sh su mysql -s /bin/bash - -c "mysql -u root -e \"flush privileges;\""
+$sudo_prefix service $mysqldservice stop

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/files/startMetastore.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/files/startMetastore.sh b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/files/startMetastore.sh
new file mode 100755
index 0000000..86541f0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/files/startMetastore.sh
@@ -0,0 +1,25 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+HIVE_BIN=${HIVE_BIN:-"hive"}
+
+HIVE_CONF_DIR=$4 $HIVE_BIN --service metastore -hiveconf hive.log.file=hivemetastore.log -hiveconf hive.log.dir=$5 > $1 2> $2 &
+echo $!|cat>$3

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/files/templetonSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/files/templetonSmoke.sh b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/files/templetonSmoke.sh
new file mode 100755
index 0000000..15489e1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/files/templetonSmoke.sh
@@ -0,0 +1,106 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export ttonhost=$1
+export smoke_test_user=$2
+export templeton_port=$3
+export ttonTestScript=$4
+export smoke_user_keytab=$5
+export security_enabled=$6
+export kinit_path_local=$7
+export smokeuser_principal=$8
+export ttonurl="http://${ttonhost}:${templeton_port}/templeton/v1"
+
+if [[ $security_enabled == "true" ]]; then
+  kinitcmd="${kinit_path_local}  -kt ${smoke_user_keytab} ${smokeuser_principal}; "
+else
+  kinitcmd=""
+fi
+
+export no_proxy=$ttonhost
+cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>'  $ttonurl/status 2>&1"
+retVal=`/var/lib/ambari-agent/ambari-sudo.sh su ${smoke_test_user} -s /bin/bash - -c "$cmd"`
+httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
+
+# try again for 2.3 username requirement
+if [[ "$httpExitCode" == "500" ]] ; then
+  cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>'  $ttonurl/status?user.name=$smoke_test_user 2>&1"
+  retVal=`/var/lib/ambari-agent/ambari-sudo.sh su ${smoke_test_user} -s /bin/bash - -c "$cmd"`
+  httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
+fi
+
+if [[ "$httpExitCode" -ne "200" ]] ; then
+  echo "Templeton Smoke Test (status cmd): Failed. : $retVal"
+  export TEMPLETON_EXIT_CODE=1
+  exit 1
+fi
+
+exit 0
+
+#try hcat ddl command
+echo "exec=show databases;" > /tmp/show_db.post.txt
+cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>' -d  \@${destdir}/show_db.post.txt  $ttonurl/ddl?user.name=${smoke_test_user} 2>&1"
+retVal=`/var/lib/ambari-agent/ambari-sudo.sh su ${smoke_test_user} -s /bin/bash - -c "$cmd"`
+httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
+
+if [[ "$httpExitCode" -ne "200" ]] ; then
+  echo "Templeton Smoke Test (ddl cmd): Failed. : $retVal"
+  export TEMPLETON_EXIT_CODE=1
+  exit  1
+fi
+
+# NOT SURE?? SUHAS
+if [[ $security_enabled == "true" ]]; then
+  echo "Templeton Pig Smoke Tests not run in secure mode"
+  exit 0
+fi
+
+#try pig query
+outname=${smoke_test_user}.`date +"%M%d%y"`.$$;
+ttonTestOutput="/tmp/idtest.${outname}.out";
+ttonTestInput="/tmp/idtest.${outname}.in";
+ttonTestScript="idtest.${outname}.pig"
+
+echo "A = load '$ttonTestInput' using PigStorage(':');"  > /tmp/$ttonTestScript
+echo "B = foreach A generate \$0 as id; " >> /tmp/$ttonTestScript
+echo "store B into '$ttonTestOutput';" >> /tmp/$ttonTestScript
+
+#copy pig script to hdfs
+su -s /bin/bash - ${smoke_test_user} -c "hadoop dfs -copyFromLocal /tmp/$ttonTestScript /tmp/$ttonTestScript"
+
+#copy input file to hdfs
+su -s /bin/bash - ${smoke_test_user} -c "hadoop dfs -copyFromLocal /etc/passwd $ttonTestInput"
+
+#create, copy post args file
+echo -n "file=/tmp/$ttonTestScript" > /tmp/pig_post.txt
+
+#submit pig query
+cmd="curl --negotiate -u :  -s -w 'http_code <%{http_code}>' -d  \@${destdir}/pig_post.txt  $ttonurl/pig?user.name=${smoke_test_user} 2>&1"
+retVal=`/var/lib/ambari-agent/ambari-sudo.sh su ${smoke_test_user} -s /bin/bash - -c "$cmd"`
+httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
+if [[ "$httpExitCode" -ne "200" ]] ; then
+  echo "Templeton Smoke Test (pig cmd): Failed. : $retVal"
+  export TEMPLETON_EXIT_CODE=1
+  exit 1
+fi
+
+exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/__init__.py
new file mode 100755
index 0000000..5561e10
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/hcat.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/hcat.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/hcat.py
new file mode 100755
index 0000000..536d6b5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/hcat.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import sys
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def hcat():
+  import params
+
+  XmlConfig("hive-site.xml",
+            conf_dir = params.hive_conf_dir,
+            configurations = params.config['configurations']['hive-site'],
+            owner=params.hive_user,
+            configuration_attributes=params.config['configuration_attributes']['hive-site']
+  )
+
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def hcat():
+  import params
+
+  Directory(params.hive_conf_dir,
+            create_parents=True,
+            owner=params.hcat_user,
+            group=params.user_group,
+  )
+
+
+  Directory(params.hcat_conf_dir,
+            create_parents=True,
+            owner=params.hcat_user,
+            group=params.user_group,
+  )
+
+  Directory(params.hcat_pid_dir,
+            owner=params.webhcat_user,
+            create_parents=True
+  )
+
+  XmlConfig("hive-site.xml",
+            conf_dir=params.hive_client_conf_dir,
+            configurations=params.config['configurations']['hive-site'],
+            configuration_attributes=params.config['configuration_attributes']['hive-site'],
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=0644)
+
+  File(format("{hcat_conf_dir}/hcat-env.sh"),
+       owner=params.hcat_user,
+       group=params.user_group,
+       content=InlineTemplate(params.hcat_env_sh_template)
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/hcat_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/hcat_client.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/hcat_client.py
new file mode 100755
index 0000000..e914cf6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/hcat_client.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from hcat import hcat
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+
+
+class HCatClient(Script):
+  def install(self, env):
+    import params
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hcat()
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HCatClientDefault(HCatClient):
+  def get_component_name(self):
+    return "hadoop-client"
+
+
+if __name__ == "__main__":
+  HCatClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/hcat_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/hcat_service_check.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/hcat_service_check.py
new file mode 100755
index 0000000..4d6e6f8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/hcat_service_check.py
@@ -0,0 +1,78 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+from resource_management import *
+from resource_management.libraries.functions import get_unique_id_and_date
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def hcat_service_check():
+    import params
+    unique = get_unique_id_and_date()
+    output_file = format("{hive_apps_whs_dir}/hcatsmoke{unique}")
+    test_cmd = format("fs -test -e {output_file}")
+
+    if params.security_enabled:
+      kinit_cmd = format(
+        "{kinit_path_local} -kt {smoke_user_keytab} {smokeuser}; ")
+    else:
+      kinit_cmd = ""
+
+    File(format("{tmp_dir}/hcatSmoke.sh"),
+         content=StaticFile("hcatSmoke.sh"),
+         mode=0755
+    )
+
+    prepare_cmd = format("{kinit_cmd}env JAVA_HOME={java64_home} {tmp_dir}/hcatSmoke.sh hcatsmoke{unique} prepare")
+
+    exec_path = params.execute_path
+    if params.version and params.stack_name:
+      upgrade_hive_bin = format("/usr/iop/{version}/hive/bin")
+      exec_path =  os.environ['PATH'] + os.pathsep + params.hadoop_bin_dir + os.pathsep + upgrade_hive_bin
+
+    Execute(prepare_cmd,
+            tries=3,
+            user=params.smokeuser,
+            try_sleep=5,
+            path=['/usr/sbin', '/usr/local/bin', '/bin', '/usr/bin', exec_path],
+            logoutput=True)
+
+    if params.security_enabled:
+      Execute (format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
+               user = params.hdfs_user,
+      )
+
+    ExecuteHadoop(test_cmd,
+                user=params.hdfs_user,
+                logoutput=True,
+                conf_dir=params.hadoop_conf_dir,
+                bin_dir=params.execute_path
+    )
+
+    cleanup_cmd = format("{kinit_cmd} {tmp_dir}/hcatSmoke.sh hcatsmoke{unique} cleanup")
+
+    Execute(cleanup_cmd,
+            tries=3,
+            user=params.smokeuser,
+            try_sleep=5,
+            path=['/usr/sbin', '/usr/local/bin', '/bin', '/usr/bin', exec_path],
+            logoutput=True)

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/hive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/hive.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/hive.py
new file mode 100755
index 0000000..6d3906d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/hive.py
@@ -0,0 +1,393 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+import glob
+from urlparse import urlparse
+from resource_management import PropertiesFile
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
+from resource_management.libraries.functions.version import compare_versions
+from resource_management.core.resources.service import ServiceConfig
+from resource_management.core.resources.system import File, Execute, Directory
+from resource_management.core.source import StaticFile, Template, DownloadSource, InlineTemplate
+from resource_management.core.shell import as_user
+from resource_management.libraries.functions.is_empty import is_empty
+from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.libraries.functions.format import format
+from resource_management.core.exceptions import Fail
+
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def hive(name=None):
+  import params
+
+  XmlConfig("hive-site.xml",
+            conf_dir = params.hive_conf_dir,
+            configurations = params.config['configurations']['hive-site'],
+            owner=params.hive_user,
+            configuration_attributes=params.config['configuration_attributes']['hive-site']
+  )
+
+  if name in ["hiveserver2","metastore"]:
+    # Manually overriding service logon user & password set by the installation package
+    service_name = params.service_map[name]
+    ServiceConfig(service_name,
+                  action="change_user",
+                  username = params.hive_user,
+                  password = Script.get_password(params.hive_user))
+    Execute(format("cmd /c hadoop fs -mkdir -p {hive_warehouse_dir}"), logoutput=True, user=params.hadoop_user)
+
+  if name == 'metastore':
+    if params.init_metastore_schema:
+      check_schema_created_cmd = format('cmd /c "{hive_bin}\\hive.cmd --service schematool -info '
+                                        '-dbType {hive_metastore_db_type} '
+                                        '-userName {hive_metastore_user_name} '
+                                        '-passWord {hive_metastore_user_passwd!p}'
+                                        '&set EXITCODE=%ERRORLEVEL%&exit /B %EXITCODE%"', #cmd "feature", propagate the process exit code manually
+                                        hive_bin=params.hive_bin,
+                                        hive_metastore_db_type=params.hive_metastore_db_type,
+                                        hive_metastore_user_name=params.hive_metastore_user_name,
+                                        hive_metastore_user_passwd=params.hive_metastore_user_passwd)
+      try:
+        Execute(check_schema_created_cmd)
+      except Fail:
+        create_schema_cmd = format('cmd /c {hive_bin}\\hive.cmd --service schematool -initSchema '
+                                   '-dbType {hive_metastore_db_type} '
+                                   '-userName {hive_metastore_user_name} '
+                                   '-passWord {hive_metastore_user_passwd!p}',
+                                   hive_bin=params.hive_bin,
+                                   hive_metastore_db_type=params.hive_metastore_db_type,
+                                   hive_metastore_user_name=params.hive_metastore_user_name,
+                                   hive_metastore_user_passwd=params.hive_metastore_user_passwd)
+        Execute(create_schema_cmd,
+                user = params.hive_user,
+                logoutput=True
+        )
+
+  if name == "hiveserver2":
+    if params.hive_execution_engine == "tez":
+      # Init the tez app dir in hadoop
+      script_file = __file__.replace('/', os.sep)
+      cmd_file = os.path.normpath(os.path.join(os.path.dirname(script_file), "..", "files", "hiveTezSetup.cmd"))
+
+      Execute("cmd /c " + cmd_file, logoutput=True, user=params.hadoop_user)
+
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def hive(name=None):
+  import params
+
+  if name == 'hiveserver2':
+    # BigInsights 4.0.* or lower
+    if params.stack_version != "" and compare_versions(params.stack_version, "4.1.0.0") < 0:
+      params.HdfsResource(params.webhcat_apps_dir,
+                            type="directory",
+                            action="create_on_execute",
+                            owner=params.webhcat_user,
+                            mode=0755
+                          )
+
+    # Create webhcat dirs.
+    if params.hcat_hdfs_user_dir != params.webhcat_hdfs_user_dir:
+      params.HdfsResource(params.hcat_hdfs_user_dir,
+                           type="directory",
+                           action="create_on_execute",
+                           owner=params.hcat_user,
+                           mode=params.hcat_hdfs_user_mode
+      )
+
+    params.HdfsResource(params.webhcat_hdfs_user_dir,
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.webhcat_user,
+                         mode=params.webhcat_hdfs_user_mode
+    )
+
+    # ****** Begin Copy Tarballs ******
+    # *********************************
+    if params.stack_version != "" and compare_versions(params.stack_version, '4.0.0.0') >= 0:
+      copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user)
+
+    # Always copy pig.tar.gz and hive.tar.gz using the appropriate mode.
+    copy_to_hdfs("pig",
+                 params.user_group,
+                 params.hdfs_user,
+                 file_mode=params.tarballs_mode,
+                 custom_source_file=params.pig_tar_source,
+                 custom_dest_file=params.pig_tar_dest_file)
+    copy_to_hdfs("hive",
+                 params.user_group,
+                 params.hdfs_user,
+                 file_mode=params.tarballs_mode,
+                 custom_source_file=params.hive_tar_source,
+                 custom_dest_file=params.hive_tar_dest_file)
+
+    wildcard_tarballs = ["sqoop", "hadoop_streaming"]
+    for tarball_name in wildcard_tarballs:
+      source_file_pattern = eval("params." + tarball_name + "_tar_source")
+      dest_dir = eval("params." + tarball_name + "_tar_dest_dir")
+
+      if source_file_pattern is None or dest_dir is None:
+        continue
+
+      source_files = glob.glob(source_file_pattern) if "*" in source_file_pattern else [source_file_pattern]
+      for source_file in source_files:
+        src_filename = os.path.basename(source_file)
+        dest_file = os.path.join(dest_dir, src_filename)
+
+        copy_to_hdfs(tarball_name,
+                     params.user_group,
+                     params.hdfs_user,
+                     file_mode=params.tarballs_mode,
+                     custom_source_file=source_file,
+                     custom_dest_file=dest_file)
+
+    # ******* End Copy Tarballs *******
+    # *********************************
+
+    params.HdfsResource(params.hive_apps_whs_dir,
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.hive_user,
+                         group=params.user_group,
+                         mode=0770
+    )
+    # Create Hive User Dir
+    params.HdfsResource(params.hive_hdfs_user_dir,
+                         type="directory",
+                          action="create_on_execute",
+                          owner=params.hive_user,
+                          mode=params.hive_hdfs_user_mode
+    )
+
+    # hive.exec.scratchdir should be created via hive_user
+    # otherwise, hive.start.cleanup.scratchdir won't work, because ambari services always started by hive_user
+    if not is_empty(params.hive_exec_scratchdir):
+      params.HdfsResource(params.hive_exec_scratchdir,
+                           type="directory",
+                           action="create_on_execute",
+                           owner=params.hive_user,
+                           group=params.hdfs_user,
+                           mode=0777) # Hive expects this dir to be writeable by everyone as it is used as a temp dir
+
+    params.HdfsResource(None, action="execute")
+
+  Directory(params.hive_etc_dir_prefix,
+            mode=0755
+  )
+
+  # We should change configurations for client as well as for server.
+  # The reason is that stale-configs are service-level, not component.
+  for conf_dir in params.hive_conf_dirs_list:
+    fill_conf_dir(conf_dir)
+
+  if name == "client":
+    permissions = 0644
+  else:
+    permissions = 0660
+
+  XmlConfig("hive-site.xml",
+            conf_dir=params.hive_config_dir,
+            configurations=params.hive_site_config,
+            configuration_attributes=params.config['configuration_attributes']['hive-site'],
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=permissions)
+
+  if params.hive_specific_configs_supported and name == 'hiveserver2':
+    XmlConfig("hiveserver2-site.xml",
+              conf_dir=params.hive_server_conf_dir,
+              configurations=params.config['configurations']['hiveserver2-site'],
+              configuration_attributes=params.config['configuration_attributes']['hiveserver2-site'],
+              owner=params.hive_user,
+              group=params.user_group,
+              mode=0644)
+
+  File(format("{hive_config_dir}/hive-env.sh"),
+       owner=params.hive_user,
+       group=params.user_group,
+       content=InlineTemplate(params.hive_env_sh_template)
+  )
+
+  # On some OS this folder could be not exists, so we will create it before pushing there files
+  Directory(params.limits_conf_dir,
+            create_parents=True,
+            owner='root',
+            group='root'
+            )
+
+  File(os.path.join(params.limits_conf_dir, 'hive.conf'),
+       owner='root',
+       group='root',
+       mode=0644,
+       content=Template("hive.conf.j2")
+       )
+
+  if name == 'metastore' or name == 'hiveserver2':
+    jdbc_connector()
+
+  File(format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
+       content = DownloadSource(format("{jdk_location}{check_db_connection_jar_name}")),
+       mode = 0644,
+  )
+
+  if name == 'metastore':
+    File(params.start_metastore_path,
+         mode=0755,
+         content=StaticFile('startMetastore.sh')
+    )
+    if params.init_metastore_schema:
+      create_schema_cmd = format("export HIVE_CONF_DIR={hive_server_conf_dir} ; "
+                                 "{hive_bin}/schematool -initSchema "
+                                 "-dbType {hive_metastore_db_type} "
+                                 "-userName {hive_metastore_user_name} "
+                                 "-passWord {hive_metastore_user_passwd!p}")
+
+      check_schema_created_cmd = as_user(format("export HIVE_CONF_DIR={hive_server_conf_dir} ; "
+                                        "{hive_bin}/schematool -info "
+                                        "-dbType {hive_metastore_db_type} "
+                                        "-userName {hive_metastore_user_name} "
+                                        "-passWord {hive_metastore_user_passwd!p}"), params.hive_user)
+
+      Execute(create_schema_cmd,
+              not_if = check_schema_created_cmd,
+              user = params.hive_user
+      )
+  elif name == 'hiveserver2':
+    File(params.start_hiveserver2_path,
+         mode=0755,
+         content=Template(format('{start_hiveserver2_script}'))
+    )
+
+  if name != "client":
+    crt_directory(params.hive_pid_dir)
+    crt_directory(params.hive_log_dir)
+    crt_directory(params.hive_var_lib)
+
+def fill_conf_dir(component_conf_dir):
+  import params
+
+  Directory(component_conf_dir,
+            owner=params.hive_user,
+            group=params.user_group,
+            create_parents=True
+  )
+
+  XmlConfig("mapred-site.xml",
+            conf_dir=component_conf_dir,
+            configurations=params.config['configurations']['mapred-site'],
+            configuration_attributes=params.config['configuration_attributes']['mapred-site'],
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=0644)
+
+
+  crt_file(format("{component_conf_dir}/hive-default.xml.template"))
+  crt_file(format("{component_conf_dir}/hive-env.sh.template"))
+
+  log4j_exec_filename = 'hive-exec-log4j.properties'
+  if (params.log4j_exec_props != None):
+    File(format("{component_conf_dir}/{log4j_exec_filename}"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.hive_user,
+         content=params.log4j_exec_props
+    )
+  elif (os.path.exists("{component_conf_dir}/{log4j_exec_filename}.template")):
+    File(format("{component_conf_dir}/{log4j_exec_filename}"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.hive_user,
+         content=StaticFile(format("{component_conf_dir}/{log4j_exec_filename}.template"))
+    )
+
+  log4j_filename = 'hive-log4j.properties'
+  if (params.log4j_props != None):
+    File(format("{component_conf_dir}/{log4j_filename}"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.hive_user,
+         content=params.log4j_props
+    )
+  elif (os.path.exists("{component_conf_dir}/{log4j_filename}.template")):
+    File(format("{component_conf_dir}/{log4j_filename}"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.hive_user,
+         content=StaticFile(format("{component_conf_dir}/{log4j_filename}.template"))
+    )
+
+def crt_directory(name):
+  import params
+
+  Directory(name,
+            create_parents=True,
+            cd_access='a',
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=0755)
+
+def crt_file(name):
+  import params
+
+  File(name,
+       owner=params.hive_user,
+       group=params.user_group
+  )
+
+def jdbc_connector():
+  import params
+
+  if params.hive_jdbc_driver in params.hive_jdbc_drivers_list and params.hive_use_existing_db:
+    environment = {
+      "no_proxy": format("{ambari_server_hostname}")
+    }
+
+    # TODO: should be removed after ranger_hive_plugin will not provide jdbc
+    Execute(('rm', '-f', params.prepackaged_ojdbc_symlink),
+            path=["/bin", "/usr/bin/"],
+            sudo = True)
+
+    File(params.downloaded_custom_connector,
+         content = DownloadSource(params.driver_curl_source),
+    )
+
+    Execute(('cp', '--remove-destination', params.downloaded_custom_connector, params.target),
+            #creates=params.target, TODO: uncomment after ranger_hive_plugin will not provide jdbc
+            path=["/bin", "/usr/bin/"],
+            sudo = True)
+
+  else:
+    #for default hive db (Mysql)
+    Execute(('cp', '--remove-destination', format('/usr/share/java/{jdbc_jar_name}'), params.target),
+            #creates=params.target, TODO: uncomment after ranger_hive_plugin will not provide jdbc
+            path=["/bin", "/usr/bin/"],
+            sudo=True
+    )
+
+  File(params.target,
+       mode = 0644,
+  )


[02/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/configuration/hbase-site.xml
new file mode 100755
index 0000000..047d3f6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/configuration/hbase-site.xml
@@ -0,0 +1,388 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>hbase.bulkload.staging.dir</name>
+    <value>/apps/hbase/staging</value>
+    <description>A staging directory in default file system (HDFS)
+    for bulk loading.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hbase.hstore.flush.retries.number</name>
+    <value>120</value>
+    <description>
+    The number of times the region flush operation will be retried.
+    </description>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.hregion.majorcompaction</name>
+    <value>604800000</value>
+    <description>Time between major compactions, expressed in milliseconds. Set to 0 to disable
+      time-based automatic major compactions. User-requested and size-based major compactions will
+      still run. This value is multiplied by hbase.hregion.majorcompaction.jitter to cause
+      compaction to start at a somewhat-random time during a given window of time. The default value
+      is 7 days, expressed in milliseconds. If major compactions are causing disruption in your
+      environment, you can configure them to run at off-peak times for your deployment, or disable
+      time-based major compactions by setting this parameter to 0, and run major compactions in a
+      cron job or by another external mechanism.</description>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>2592000000</maximum>
+      <unit>milliseconds</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hbase.hregion.majorcompaction.jitter</name>
+    <value>0.50</value>
+    <description>A multiplier applied to hbase.hregion.majorcompaction to cause compaction to occur
+      a given amount of time either side of hbase.hregion.majorcompaction. The smaller the number,
+      the closer the compactions will happen to the hbase.hregion.majorcompaction
+      interval.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.block.multiplier</name>
+    <value>4</value>
+    <description>
+    Block updates if memstore has hbase.hregion.memstore.block.multiplier
+    times hbase.hregion.memstore.flush.size bytes.  Useful preventing
+    runaway memstore during spikes in update traffic.  Without an
+    upper-bound, memstore fills such that when it flushes the
+    resultant flush files take a long time to compact or split, or
+    worse, we OOME.
+    </description>
+    <display-name>HBase Region Block Multiplier</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>2</value>
+        </entry>
+        <entry>
+          <value>4</value>
+        </entry>
+        <entry>
+          <value>8</value>
+        </entry>
+      </entries>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hbase.bucketcache.ioengine</name>
+    <value/>
+    <description>Where to store the contents of the bucketcache. One of: onheap,
+      offheap, or file. If a file, set it to file:PATH_TO_FILE.</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hbase.bucketcache.size</name>
+    <value/>
+    <description>The size of the buckets for the bucketcache if you only use a single size.</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hbase.bucketcache.percentage.in.combinedcache</name>
+    <value/>
+    <description>Value to be set between 0.0 and 1.0</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hbase.regionserver.wal.codec</name>
+    <display-name>RegionServer WAL Codec</display-name>
+    <value>org.apache.hadoop.hbase.regionserver.wal.WALCellCodec</value>
+    <depends-on>
+      <property>
+        <type>hbase-env</type>
+        <name>phoenix_sql_enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hbase.region.server.rpc.scheduler.factory.class</name>
+    <value/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hbase-env</type>
+        <name>phoenix_sql_enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hbase.rpc.controllerfactory.class</name>
+    <value/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hbase-env</type>
+        <name>phoenix_sql_enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>phoenix.functions.allowUserDefinedFunctions</name>
+    <value> </value>
+    <depends-on>
+      <property>
+        <type>hbase-env</type>
+        <name>phoenix_sql_enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hbase.coprocessor.regionserver.classes</name>
+    <value/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hbase-site</type>
+        <name>hbase.security.authorization</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hbase.hstore.compaction.max</name>
+    <value>10</value>
+    <description>The maximum number of StoreFiles which will be selected for a single minor
+      compaction, regardless of the number of eligible StoreFiles. Effectively, the value of
+      hbase.hstore.compaction.max controls the length of time it takes a single compaction to
+      complete. Setting it larger means that more StoreFiles are included in a compaction. For most
+      cases, the default value is appropriate.
+    </description>
+    <display-name>Maximum Files for Compaction</display-name>
+    <value-attributes>
+      <type>int</type>
+      <entries>
+        <entry>
+          <value>8</value>
+        </entry>
+        <entry>
+          <value>9</value>
+        </entry>
+        <entry>
+          <value>10</value>
+        </entry>
+        <entry>
+          <value>11</value>
+        </entry>
+        <entry>
+          <value>12</value>
+        </entry>
+        <entry>
+          <value>13</value>
+        </entry>
+        <entry>
+          <value>14</value>
+        </entry>
+        <entry>
+          <value>15</value>
+        </entry>
+      </entries>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hbase.regionserver.global.memstore.size</name>
+    <value>0.4</value>
+    <description>Percentage of RegionServer memory to allocate to write buffers.
+      Each column family within each region is allocated a smaller pool (the memstore) within this shared write pool.
+      If this buffer is full, updates are blocked and data is flushed from memstores until a global low watermark
+      (hbase.regionserver.global.memstore.size.lower.limit) is reached.
+    </description>
+    <display-name>% of RegionServer Allocated to Write Buffers</display-name>
+    <value-attributes>
+      <type>float</type>
+      <minimum>0</minimum>
+      <maximum>0.8</maximum>
+      <increment-step>0.01</increment-step>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hbase.coprocessor.master.classes</name>
+    <value/>
+    <description>A comma-separated list of
+      org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
+      loaded by default on the active HMaster process. For any implemented
+      coprocessor methods, the listed classes will be called in order. After
+      implementing your own MasterObserver, just put it in HBase's classpath
+      and add the fully qualified class name here.
+    </description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hbase-site</type>
+        <name>hbase.security.authorization</name>
+      </property>
+      <property>
+        <type>ranger-hbase-plugin-properties</type>
+        <name>ranger-hbase-plugin-enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+
+  <property>
+    <name>hbase.table.sanity.checks</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>hbase.coprocessor.region.classes</name>
+    <value>org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint</value>
+    <description>A comma-separated list of Coprocessors that are loaded by
+      default on all tables. For any override coprocessor method, these classes
+      will be called in order. After implementing your own Coprocessor, just put
+      it in HBase's classpath and add the fully qualified class name here.
+      A coprocessor can also be loaded on demand by setting HTableDescriptor.
+    </description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hbase-site</type>
+        <name>hbase.security.authorization</name>
+      </property>
+      <property>
+        <type>hbase-site</type>
+        <name>hbase.security.authentication</name>
+      </property>
+      <property>
+        <type>ranger-hbase-plugin-properties</type>
+        <name>ranger-hbase-plugin-enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hbase.security.authorization</name>
+    <value>false</value>
+    <description> Set Authorization Method.</description>
+    <display-name>Enable Authorization</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>Native</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>Off</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-hbase-plugin-properties</type>
+        <name>ranger-hbase-plugin-enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hbase.master.port</name>
+    <value>16000</value>
+    <display-name>HBase Master Port</display-name>
+    <description>The port the HBase Master should bind to.</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>int</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hbase.master.info.port</name>
+    <value>16010</value>
+    <description>The port for the HBase Master web UI.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hbase.regionserver.port</name>
+    <value>16020</value>
+    <description>The port the HBase RegionServer binds to.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port</name>
+    <value>16030</value>
+    <description>The port for the HBase RegionServer web UI.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hbase.regionserver.global.memstore.upperLimit</name>
+    <value>0.4</value>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.regionserver.global.memstore.lowerLimit</name>
+    <value>0.38</value>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase.master.ui.readonly</name>
+    <value>false</value>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hbase.rest.authentication.type</name>
+    <value>simple</value>
+  </property>
+  <property>
+    <name>hbase.rest.port</name>
+    <value>8091</value>
+  </property>
+  
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/configuration/ranger-hbase-audit.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/configuration/ranger-hbase-audit.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/configuration/ranger-hbase-audit.xml
new file mode 100755
index 0000000..53d222a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/configuration/ranger-hbase-audit.xml
@@ -0,0 +1,121 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>xasecure.audit.is.enabled</name>
+    <value>true</value>
+    <description>Is Audit enabled?</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs</name>
+    <value>true</value>
+    <display-name>Audit to HDFS</display-name>
+    <description>Is Audit to HDFS enabled?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.hdfs</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs.dir</name>
+    <value>hdfs://NAMENODE_HOSTNAME:8020/ranger/audit</value>
+    <description>HDFS folder to write audit to, make sure the service user has requried permissions</description>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.hdfs.dir</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.hdfs.batch.filespool.dir</name>
+    <value>/var/log/hbase/audit/hdfs/spool</value>
+    <description>/var/log/hbase/audit/hdfs/spool</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr</name>
+    <value>false</value>
+    <display-name>Audit to SOLR</display-name>
+    <description>Is Solr audit enabled?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>xasecure.audit.destination.solr</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.urls</name>
+    <value/>
+    <description>Solr URL</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-admin-site</type>
+        <name>ranger.audit.solr.urls</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.zookeepers</name>
+    <value>NONE</value>
+    <description>Solr Zookeeper string</description>
+    <depends-on>
+      <property>
+        <type>ranger-admin-site</type>
+        <name>ranger.audit.solr.zookeepers</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>xasecure.audit.destination.solr.batch.filespool.dir</name>
+    <value>/var/log/hbase/audit/solr/spool</value>
+    <description>/var/log/hbase/audit/solr/spool</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>xasecure.audit.provider.summary.enabled</name>
+    <value>true</value>
+    <display-name>Audit provider summary enabled</display-name>
+    <description>Enable Summary audit?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/configuration/ranger-hbase-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/configuration/ranger-hbase-plugin-properties.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/configuration/ranger-hbase-plugin-properties.xml
new file mode 100755
index 0000000..e8664ae
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/configuration/ranger-hbase-plugin-properties.xml
@@ -0,0 +1,83 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+  <property>
+    <name>common.name.for.certificate</name>
+    <value/>
+    <description>Common name for certificate, this value should match what is specified in repo within ranger admin</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>policy_user</name>
+    <value>ambari-qa</value>
+    <display-name>Policy user for HBASE</display-name>
+    <description>This user must be system user and also present at Ranger admin portal</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ranger-hbase-plugin-enabled</name>
+    <value>No</value>
+    <display-name>Enable Ranger for HBASE</display-name>
+    <description>Enable ranger hbase plugin ?</description>
+    <value-attributes>
+      <type>boolean</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ranger-env</type>
+        <name>ranger-hbase-plugin-enabled</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>REPOSITORY_CONFIG_USERNAME</name>
+    <value>hbase</value>
+    <display-name>Ranger repository config user</display-name>
+    <description>Used for repository creation on ranger admin</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>REPOSITORY_CONFIG_PASSWORD</name>
+    <value>hbase</value>
+    <display-name>Ranger repository config password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>Used for repository creation on ranger admin</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>SSL_KEYSTORE_PASSWORD</name>
+    <value>myKeyFilePassword</value>
+    <property-type>PASSWORD</property-type>
+    <description/>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/configuration/ranger-hbase-policymgr-ssl.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/configuration/ranger-hbase-policymgr-ssl.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/configuration/ranger-hbase-policymgr-ssl.xml
new file mode 100755
index 0000000..932591f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/configuration/ranger-hbase-policymgr-ssl.xml
@@ -0,0 +1,66 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore</name>
+    <value>/usr/iop/current/hbase-client/conf/ranger-plugin-keystore.jks</value>
+    <description>Java Keystore files</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore.password</name>
+    <value>myKeyFilePassword</value>
+    <property-type>PASSWORD</property-type>
+    <description>password for keystore</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore</name>
+    <value>/usr/iop/current/hbase-client/conf/ranger-plugin-truststore.jks</value>
+    <description>java truststore file</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore.password</name>
+    <value>changeit</value>
+    <property-type>PASSWORD</property-type>
+    <description>java truststore password</description>
+    <value-attributes>
+      <type>password</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.keystore.credential.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>java keystore credential file</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>xasecure.policymgr.clientssl.truststore.credential.file</name>
+    <value>jceks://file{{credential_file}}</value>
+    <description>java truststore credential file</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/configuration/ranger-hbase-security.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/configuration/ranger-hbase-security.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/configuration/ranger-hbase-security.xml
new file mode 100755
index 0000000..7ef63d8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/configuration/ranger-hbase-security.xml
@@ -0,0 +1,68 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>ranger.plugin.hbase.service.name</name>
+    <value>{{repo_name}}</value>
+    <description>Name of the Ranger service containing HBase policies</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ranger.plugin.hbase.policy.source.impl</name>
+    <value>org.apache.ranger.admin.client.RangerAdminRESTClient</value>
+    <description>Class to retrieve policies from the source</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ranger.plugin.hbase.policy.rest.url</name>
+    <value>{{policymgr_mgr_url}}</value>
+    <description>URL to Ranger Admin</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ranger.plugin.hbase.policy.rest.ssl.config.file</name>
+    <value>/etc/hbase/conf/ranger-policymgr-ssl.xml</value>
+    <description>Path to the file containing SSL details to contact Ranger Admin</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ranger.plugin.hbase.policy.pollIntervalMs</name>
+    <value>30000</value>
+    <description>How often to poll for changes in policies?</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ranger.plugin.hbase.policy.cache.dir</name>
+    <value>/etc/ranger/{{repo_name}}/policycache</value>
+    <description>Directory where Ranger policies are cached after successful retrieval from the source</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>xasecure.hbase.update.xapolicies.on.grant.revoke</name>
+    <value>true</value>
+    <display-name>Should HBase GRANT/REVOKE update XA policies</display-name>
+    <description>Should HBase plugin update Ranger policies for updates to permissions done using GRANT/REVOKE?</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/kerberos.json b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/kerberos.json
new file mode 100755
index 0000000..c71e8fa
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/kerberos.json
@@ -0,0 +1,212 @@
+{
+  "services": [
+    {
+      "name": "HBASE",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "hbase",
+          "principal": {
+            "value": "${hbase-env/hbase_user}-${cluster_name|toLower()}@${realm}",
+            "type" : "user",
+            "configuration": "hbase-env/hbase_principal_name",
+            "local_username": "${hbase-env/hbase_user}"
+          },
+          "keytab": {
+            "file": "${keytab_dir}/hbase.headless.keytab",
+            "owner": {
+              "name": "${hbase-env/hbase_user}",
+              "access": "r"
+            },
+            "group": {
+              "name": "${cluster-env/user_group}",
+              "access": ""
+            },
+            "configuration": "hbase-env/hbase_user_keytab"
+          }
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "configurations": [
+        {
+          "hbase-site": {
+            "hbase.security.authentication": "kerberos",
+            "hbase.rest.authentication.type": "kerberos",
+            "hbase.security.authorization": "true",
+            "zookeeper.znode.parent": "/hbase-secure",
+            "hbase.coprocessor.master.classes": "{{hbase_coprocessor_master_classes}}",
+            "hbase.coprocessor.region.classes": "{{hbase_coprocessor_region_classes}}",
+            "hbase.coprocessor.regionserver.classes": "{{hbase_coprocessor_regionserver_classes}}",
+            "hbase.bulkload.staging.dir": "/apps/hbase/staging",
+            "hbase.master.ui.readonly": "true"
+          }
+        },
+        {
+          "core-site": {
+            "hadoop.proxyuser.hbase.groups": "${hadoop-env/proxyuser_group}",
+            "hadoop.proxyuser.hbase.hosts": "*"
+          }
+        },
+	{
+          "ranger-hbase-audit": {
+            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
+            "xasecure.audit.jaas.Client.option.storeKey": "false",
+            "xasecure.audit.jaas.Client.option.serviceName": "solr",
+            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "HBASE_MASTER",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "hbase_master_hbase",
+              "principal": {
+                "value": "hbase/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hbase-site/hbase.master.kerberos.principal",
+                "local_username": "${hbase-env/hbase_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hbase.service.keytab",
+                "owner": {
+                  "name": "${hbase-env/hbase_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hbase-site/hbase.master.keytab.file"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "hbase-site/hbase.security.authentication.spnego.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "hbase-site/hbase.security.authentication.spnego.kerberos.keytab"
+              }
+            },
+            {
+              "name": "/HBASE/HBASE_MASTER/hbase_master_hbase",
+              "principal": {
+                "configuration": "ranger-hbase-audit/xasecure.audit.jaas.Client.option.principal"
+              },
+              "keytab": {
+                "configuration": "ranger-hbase-audit/xasecure.audit.jaas.Client.option.keyTab"
+              }
+            }
+          ]
+        },
+        {
+          "name": "HBASE_REGIONSERVER",
+          "identities": [
+            {
+              "name": "hbase_regionserver_hbase",
+              "principal": {
+                "value": "hbase/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hbase-site/hbase.regionserver.kerberos.principal",
+                "local_username": "${hbase-env/hbase_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hbase.service.keytab",
+                "owner": {
+                  "name": "${hbase-env/hbase_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hbase-site/hbase.regionserver.keytab.file"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "hbase-site/hbase.security.authentication.spnego.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "hbase-site/hbase.security.authentication.spnego.kerberos.keytab"
+              }
+            }
+          ]
+        },
+        {
+          "name": "HBASE_REST_SERVER",
+          "identities": [
+            {
+              "name": "hbase_rest_server_hbase",
+              "principal": {
+                "value": "hbase/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hbase-site/hbase.rest.kerberos.principal",
+                "local_username": "${hbase-env/hbase_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hbase.service.keytab",
+                "owner": {
+                  "name": "${hbase-env/hbase_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hbase-site/hbase.rest.keytab.file"
+              }
+            },
+            {
+              "name": "hbase_rest_server_spnego",
+              "principal": {
+                "value": "HTTP/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hbase-site/hbase.rest.authentication.kerberos.principal",
+                "local_username": "${hbase-env/hbase_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hbase.service.keytab",
+                "owner": {
+                  "name": "${hbase-env/hbase_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hbase-site/hbase.rest.authentication.kerberos.keytab"
+              }
+            }
+          ]
+        },
+        {
+          "name": "PHOENIX_QUERY_SERVER",
+          "identities": [
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "hbase-site/phoenix.queryserver.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "hbase-site/phoenix.queryserver.keytab.file"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/metainfo.xml
new file mode 100755
index 0000000..99ba689
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/metainfo.xml
@@ -0,0 +1,88 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HBASE</name>
+      <extends>common-services/HBASE/0.96.0.2.0</extends>
+      <version>1.2.4</version>
+      
+      <components>   
+        <component>
+          <name>PHOENIX_QUERY_SERVER</name>
+          <displayName>Phoenix Query Server</displayName>
+          <category>SLAVE</category>
+          <cardinality>0+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/phoenix_queryserver.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <logs>
+            <log>
+              <logId>hbase_phoenix_server</logId>
+              <primary>true</primary>
+            </log>
+          </logs>
+        </component>
+
+        <component>
+          <name>HBASE_REST_SERVER</name>
+          <displayName>HBaseRestServer</displayName>
+          <category>SLAVE</category>
+          <cardinality>0+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/hbase_restgatewayserver.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+        </component>
+      </components>
+
+      <themes>
+        <theme>
+          <fileName>theme.json</fileName>
+          <default>true</default>
+        </theme>
+      </themes>
+
+      <quickLinksConfigurations>
+        <quickLinksConfiguration>
+          <fileName>quicklinks.json</fileName>
+          <default>true</default>
+        </quickLinksConfiguration>
+      </quickLinksConfigurations>
+              
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+               <name>hbase_4_2_5_*</name>
+            </package>
+            <package>
+             <name>phoenix_4_2_5_*</name>
+           </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      
+    </service>
+  </services>
+</metainfo>


[19/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/scripts/solr_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/scripts/solr_server.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/scripts/solr_server.py
new file mode 100755
index 0000000..5cefc73
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/scripts/solr_server.py
@@ -0,0 +1,118 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import Direction
+from resource_management.libraries.functions import stack_select
+from solr_service import solr_service
+from solr import solr
+import os
+
+class SolrServer(Script):
+  def install(self, env):
+    self.install_packages(env)
+
+  def configure(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    if not os.path.isfile("/usr/iop/4.1.0.0/solr/conf/solr.in.sh"):
+      solr(type='4103', upgrade_type=upgrade_type)
+    solr(type='server', upgrade_type=upgrade_type)
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Stack Upgrade pre-restart")
+    import params
+    env.set_params(params)
+    if params.version and compare_versions(format_stack_version(params.version), '4.1.0.0') >= 0:
+      stack_select.select("solr-server", params.version)
+
+      call_conf_select = True
+      conf_dir = '/usr/iop/4.1.0.0/solr/conf'
+      if params.upgrade_direction is not None and params.upgrade_direction == Direction.DOWNGRADE and not os.path.islink(conf_dir):
+        call_conf_select = False
+
+      if call_conf_select:
+        conf_select.select(params.stack_name, "solr", params.version)
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env)
+    solr_service(action = 'start')
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    solr_service(action = 'stop')
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.solr_pid_file)
+
+  def security_status(self, env):
+    import status_params
+    env.set_params(status_params)
+    if status_params.security_enabled:
+      props_value_check = {"solr.hdfs.security.kerberos.enabled":"true"}
+      props_empty_check = ["solr.hdfs.security.kerberos.keytabfile",
+                           "solr.hdfs.security.kerberos.principal"]
+      props_read_check = ["solr.hdfs.security.kerberos.keytabfile"]
+      solr_site_props = build_expectations('solr-site', props_value_check, props_empty_check, props_read_check)
+
+      solr_expectations = {}
+      solr_expectations.update(solr_site_props)
+
+      security_params = get_params_from_filesystem(status_params.solr_conf_dir,
+                                                   {'solr-site.xml': FILE_TYPE_XML})
+      result_issues = validate_security_config_properties(security_params,solr_expectations)
+
+      if not result_issues: # If all validations passed successfully
+        try:
+          if 'solr-site' not in security_params \
+            or 'solr.hdfs.security.kerberos.keytabfile' not in security_params['solr-site'] \
+            or 'solr.hdfs.security.kerberos.principal' not in security_params['solr-site']:
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.solr_user,
+                                security_params['solr-site']['solr.hdfs.security.kerberos.keytabfile'],
+                                security_params['solr-site']['solr.hdfs.security.kerberos.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+
+if __name__ == "__main__":
+  SolrServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/scripts/solr_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/scripts/solr_service.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/scripts/solr_service.py
new file mode 100755
index 0000000..105aac6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/scripts/solr_service.py
@@ -0,0 +1,59 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+def solr_service(action='start'):
+  import params
+  cmd = format("{solr_home}/bin/solr")
+
+  if action == 'start':
+
+    if params.security_enabled:
+      if params.solr_principal is None:
+        solr_principal_with_host = 'missing_principal'
+      else:
+        solr_principal_with_host = params.solr_principal.replace("_HOST", params.hostname)
+      kinit_cmd = format("{kinit_path_local} -kt {solr_keytab} {solr_principal_with_host};")
+      Execute(kinit_cmd,user=params.solr_user)
+
+    Execute (params.solr_home+'/server/scripts/cloud-scripts/zkcli.sh -zkhost ' + params.zookeeper_hosts_list + ' -cmd makepath ' + params.zookeeper_chroot, user=params.solr_user, ignore_failures=True )
+
+    if (params.upgrade_direction is not None and params.upgrade_direction == Direction.UPGRADE) or (compare_versions(format_stack_version(params.current_version), '4.2.0.0') >= 0):
+      solr_home_dir = params.solr_data_dir
+    else:
+      solr_home_dir = params.lib_dir + "/data"
+
+    daemon_cmd = format("SOLR_INCLUDE={solr_conf_dir}/solr.in.sh {cmd} start -c -s {solr_home_dir} -V")
+    no_op_test = format("ls {solr_pid_file} >/dev/null 2>&1 && ps `cat {solr_pid_file}` >/dev/null 2>&1")
+    Execute(daemon_cmd,
+            not_if=no_op_test,
+            user=params.solr_user
+    )
+  elif action == 'stop':
+    daemon_cmd = format("SOLR_INCLUDE={solr_conf_dir}/solr.in.sh {cmd} stop")
+    no_op_test = format("! ((`SOLR_INCLUDE={solr_conf_dir}/solr.in.sh {cmd} status |grep process |wc -l`))")
+    rm_pid = format("rm -f {solr_pid_file}")
+    Execute(daemon_cmd,
+            not_if=no_op_test,
+            user=params.solr_user
+    )
+    Execute(rm_pid)

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/scripts/solr_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/scripts/solr_upgrade.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/scripts/solr_upgrade.py
new file mode 100755
index 0000000..d72c5a3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/scripts/solr_upgrade.py
@@ -0,0 +1,135 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+from resource_management import *
+from resource_management.core.logger import Logger
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import get_unique_id_and_date
+
+class SolrServerUpgrade(Script):
+  def pre_upgrade_conf41(self, env):
+    """
+    Create /etc/solr/4.1.0.0/0 directory and copies Solr config files here.
+    Create symlinks accordingly.
+
+    conf-select create-conf-dir --package solr --stack-version 4.1.0.0 --conf-version 0
+    cp -r /usr/iop/4.1.0.0/solr/conf/* /etc/solr/4.1.0.0/0/.
+    unlink or rm -r /usr/iop/4.1.0.0/solr/conf
+    ln -s /etc/solr/4.1.0.0/0 /usr/iop/4.1.0.0/solr/conf
+    conf-select set-conf-dir --package solr --stack-version 4.1.0.0 --conf-version 0
+    """
+    import params
+    env.set_params(params)
+
+    solr41_conf_dir="/usr/iop/4.1.0.0/solr/conf"
+    solr41_etc_dir="/etc/solr/4.1.0.0/0"
+    if not os.path.exists(solr41_etc_dir):
+      conf_select.create(params.stack_name, "solr", "4.1.0.0")
+
+    content_path=solr41_conf_dir
+    if not os.path.isfile("/usr/iop/4.1.0.0/solr/conf/solr.in.sh"):
+      content_path = "/etc/solr/conf.backup"
+
+    for each in os.listdir(content_path):
+      File(os.path.join(solr41_etc_dir, each),
+           owner=params.solr_user,
+           content = StaticFile(os.path.join(content_path, each)))
+
+    if not os.path.islink(solr41_conf_dir):
+      Directory(solr41_conf_dir,
+                action="delete",
+                create_parents=True)
+
+    if os.path.islink(solr41_conf_dir):
+      os.unlink(solr41_conf_dir)
+
+    if not os.path.islink(solr41_conf_dir):
+      Link(solr41_conf_dir,
+           to=solr41_etc_dir
+      )
+
+    conf_select.select(params.stack_name, "solr", "4.1.0.0")
+
+  def pre_stop_backup_cores(self, env):
+    """
+    Backs up the Solr cores under Solr's home directory.
+    cp -r /var/lib/solr/data/* /tmp/solr/cores
+    """
+    import params
+    env.set_params(params)
+
+    if compare_versions(format_stack_version(params.current_version), '4.2.0.0') >= 0:
+      solr_home_dir=params.solr_data_dir
+    else: #4.1.0.0
+      solr_home_dir=params.old_lib_dir + "/data"
+
+    unique = get_unique_id_and_date()
+    backup_solr_dir="/tmp/upgrades/{0}/solr_{1}".format(params.current_version, unique)
+    backup_solr_cores="/tmp/solr/cores"
+
+    if os.path.isdir(solr_home_dir) and not os.path.isdir(backup_solr_dir):
+      os.makedirs(backup_solr_dir)
+      Execute(('cp', '-r', solr_home_dir+"/.", backup_solr_dir),
+              sudo=True
+      )
+
+    if params.upgrade_direction is not None and params.upgrade_direction == Direction.UPGRADE:
+      Directory(backup_solr_cores,
+                action="delete",
+                create_parents=True)
+
+      Directory(backup_solr_cores,
+                mode=0755,
+                cd_access='a',
+                owner=params.solr_user,
+                create_parents=True,
+                group=params.user_group
+      )
+
+      Execute(('cp', '-r', solr_home_dir+"/.", backup_solr_cores),
+              user=params.solr_user
+      )
+
+  def pre_start_migrate_cores(self, env):
+    """
+    Copy the Solr cores from previous version to the new Solr home directory if solr_home is a differnet directory.
+    cp -r /tmp/solr/cores/* /opt/solr/data/.
+    """
+    import params
+    env.set_params(params)
+
+    if params.upgrade_direction is not None and params.upgrade_direction == Direction.UPGRADE:
+      backup_solr_cores="/tmp/solr/cores"
+      solr_home_dir=params.solr_data_dir
+
+      Directory(format(solr_home_dir),
+                owner=params.solr_user,
+                create_parents=True,
+                group=params.user_group
+      )
+
+      if os.path.isdir(solr_home_dir) and os.path.isdir(backup_solr_cores):
+        Execute(('cp', '-rn', backup_solr_cores+"/.", solr_home_dir),
+                 user=params.solr_user,
+                 logoutput=True
+        )
+
+if __name__ == "__main__":
+  SolrServerUpgrade().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/scripts/status_params.py
new file mode 100755
index 0000000..73c3b16
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/scripts/status_params.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import status_params
+
+config = Script.get_config()
+
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+solr_conf_dir='/usr/iop/current/solr-server/conf'
+solr_user = config['configurations']['solr-env']['solr_user']
+hostname = config['hostname']
+kinit_path_local = functions.get_kinit_path()
+tmp_dir = Script.get_tmp_dir()
+solr_pid_dir = config['configurations']['solr-env']['solr_pid_dir']
+solr_port = config['configurations']['solr-env']['solr_port']
+solr_pid_file = format("{solr_pid_dir}/solr-{solr_port}.pid")
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/templates/solr.xml.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/templates/solr.xml.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/templates/solr.xml.j2
new file mode 100755
index 0000000..71d821e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/templates/solr.xml.j2
@@ -0,0 +1,51 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!--
+   This is an example of a simple "solr.xml" file for configuring one or
+   more Solr Cores, as well as allowing Cores to be added, removed, and
+   reloaded via HTTP requests.
+
+   More information about options available in this configuration file,
+   and Solr Core administration can be found online:
+   http://wiki.apache.org/solr/CoreAdmin
+-->
+
+<solr>
+
+  <solrcloud>
+
+    <str name="host">${host:}</str>
+    <int name="hostPort">${jetty.port:8983}</int>
+    <str name="hostContext">${hostContext:solr}</str>
+
+    <bool name="genericCoreNodeNames">${genericCoreNodeNames:true}</bool>
+
+    <int name="zkClientTimeout">${zkClientTimeout:30000}</int>
+    <int name="distribUpdateSoTimeout">${distribUpdateSoTimeout:600000}</int>
+    <int name="distribUpdateConnTimeout">${distribUpdateConnTimeout:60000}</int>
+
+  </solrcloud>
+
+  <shardHandlerFactory name="shardHandlerFactory"
+    class="HttpShardHandlerFactory">
+    <int name="socketTimeout">${socketTimeout:600000}</int>
+    <int name="connTimeout">${connTimeout:60000}</int>
+  </shardHandlerFactory>
+
+</solr>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/templates/solr_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/templates/solr_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/templates/solr_jaas.conf.j2
new file mode 100755
index 0000000..91794d6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/templates/solr_jaas.conf.j2
@@ -0,0 +1,26 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Client {
+ com.sun.security.auth.module.Krb5LoginModule required
+ useKeyTab=true
+ storeKey=true
+ useTicketCache=false
+ keyTab="{{solr_web_kerberos_keytab}}"
+ principal="{{solr_web_kerberos_principal}}";
+};
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/alerts.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/alerts.json
new file mode 100755
index 0000000..0e38f16
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/alerts.json
@@ -0,0 +1,32 @@
+{
+  "SPARK": {
+    "service": [],
+    "SPARK_JOBHISTORYSERVER": [
+      {
+        "name": "SPARK_JOBHISTORYSERVER_PROCESS",
+        "label": "Spark History Server",
+        "description": "This host-level alert is triggered if the Spark History Server cannot be determined to be up.",
+        "interval": 1,
+        "scope": "HOST",
+        "source": {
+          "type": "PORT",
+          "uri": "{{spark-defaults/spark.history.ui.port}}",
+          "default_port": 18080,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5
+            }
+          }
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/configuration/spark-defaults.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/configuration/spark-defaults.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/configuration/spark-defaults.xml
new file mode 100755
index 0000000..30826f1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/configuration/spark-defaults.xml
@@ -0,0 +1,159 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="true">
+
+  <property>
+    <name>spark.eventLog.enabled</name>
+    <value>true</value>
+    <description>Whether to log Spark events, useful for reconstructing the Web UI after the application has finished.</description>
+  </property>
+
+  <property>
+    <name>spark.eventLog.dir</name>
+    <value>/iop/apps/4.1.0.0/spark/logs/history-server</value>
+    <description>Base directory in which Spark events are logged, if spark.eventLog.enabled is true. Within this base directory, Spark creates a sub-directory for each application, and logs the events specific to the application in this directory. Users may want to set this to a unified location like an HDFS directory so history files can be read by the history server.</description>
+  </property>
+
+  <property>
+    <name>spark.yarn.executor.memoryOverhead</name>
+    <value>384</value>
+    <description>
+      The amount of off heap memory (in megabytes) to be allocated per executor.
+      This is memory that accounts for things like VM overheads, interned strings,
+      other native overheads, etc.
+    </description>
+  </property>
+
+  <property>
+    <name>spark.yarn.driver.memoryOverhead</name>
+    <value>384</value>
+    <description>
+      The amount of off heap memory (in megabytes) to be allocated per driver.
+      This is memory that accounts for things like VM overheads, interned strings,
+      other native overheads, etc.
+    </description>
+  </property>
+
+  <property>
+    <name>spark.yarn.applicationMaster.waitTries</name>
+    <value>10</value>
+    <description>
+      Set the number of times the ApplicationMaster waits for the the Spark master and then
+      also the number of tries it waits for the SparkContext to be initialized.
+    </description>
+  </property>
+
+  <property>
+    <name>spark.yarn.scheduler.heartbeat.interval-ms</name>
+    <value>5000</value>
+    <description>
+      The interval in ms in which the Spark application master heartbeats into the YARN ResourceManager.
+    </description>
+  </property>
+
+  <property>
+    <name>spark.yarn.max.executor.failures</name>
+    <value>3</value>
+    <description>
+      The maximum number of executor failures before failing the application.
+    </description>
+  </property>
+
+  <property>
+    <name>spark.yarn.queue</name>
+    <value>default</value>
+    <description>
+      The name of the YARN queue to which the application is submitted.
+    </description>
+  </property>
+
+  <property>
+    <name>spark.yarn.containerLauncherMaxThreads</name>
+    <value>25</value>
+    <description>
+      The maximum number of threads to use in the application master for launching executor containers.
+    </description>
+  </property>
+
+  <property>
+    <name>spark.yarn.submit.file.replication</name>
+    <value>3</value>
+    <description>
+      HDFS replication level for the files uploaded into HDFS for the application.
+      These include things like the Spark jar, the app jar, and any distributed cache files/archives.
+    </description>
+  </property>
+
+  <property>
+    <name>spark.yarn.preserve.staging.files</name>
+    <value>false</value>
+    <description>
+      Set to true to preserve the staged files (Spark jar, app jar, distributed cache files) at the
+      end of the job rather then delete them.
+    </description>
+  </property>
+
+  <property>
+    <name>spark.history.ui.port</name>
+    <value>18080</value>
+    <description>
+      The port to which the web interface of the History Server binds.
+    </description>
+  </property>
+  <property>
+    <name>spark.driver.extraJavaOptions</name>
+    <value>-Diop.version={{iop_full_version}}</value>
+    <description>
+      Specifies parameters that are passed to the JVM of the Spark driver.
+    </description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>spark.yarn.am.extraJavaOptions</name>
+    <value>-Diop.version={{iop_full_version}}</value>
+    <description>
+      Specifies the parameters that are passed to the JVM of the Spark Application Master.
+    </description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>spark.history.kerberos.principal</name>
+    <value>none</value>
+    <description>
+      Kerberos principal name for the Spark History Server.
+    </description>
+  </property>
+
+  <property>
+    <name>spark.history.kerberos.keytab</name>
+    <value>none</value>
+    <description>
+      Location of the kerberos keytab file for the Spark History Server.
+    </description>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/configuration/spark-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/configuration/spark-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/configuration/spark-env.xml
new file mode 100755
index 0000000..e0c8a01
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/configuration/spark-env.xml
@@ -0,0 +1,110 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+
+  <property>
+    <name>spark_thriftserver_port</name>
+    <value>10002</value>
+    <description>
+      TCP port number to listen on, default 10002.
+    </description>
+  </property>
+
+  <property>
+    <name>spark_user</name>
+    <value>spark</value>
+    <property-type>USER</property-type>
+    <description>Spark User.</description>
+  </property>
+
+  <property>
+    <name>spark_group</name>
+    <value>spark</value>
+    <property-type>GROUP</property-type>
+    <description>spark group</description>
+  </property>
+
+  <property>
+    <name>spark_log_dir</name>
+    <value>/var/log/spark</value>
+    <description>Spark Log Dir</description>
+  </property>
+
+  <property>
+    <name>spark_pid_dir</name>
+    <value>/var/run/spark</value>
+  </property>
+
+  <!-- spark-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for spark-env.sh file</description>
+    <value>
+#!/usr/bin/env bash
+
+# This file is sourced when running various Spark programs.
+# Copy it as spark-env.sh and edit that to configure Spark for your site.
+
+# Options read in YARN client mode
+#SPARK_EXECUTOR_INSTANCES="2" #Number of workers to start (Default: 2)
+#SPARK_EXECUTOR_CORES="1" #Number of cores for the workers (Default: 1).
+#SPARK_EXECUTOR_MEMORY="1G" #Memory per Worker (e.g. 1000M, 2G) (Default: 1G)
+#SPARK_DRIVER_MEMORY="512 Mb" #Memory for Master (e.g. 1000M, 2G) (Default: 512 Mb)
+#SPARK_YARN_APP_NAME="spark" #The name of your application (Default: Spark)
+#SPARK_YARN_QUEUE="~@~Xdefault~@~Y" #The hadoop queue to use for allocation requests (Default: @~Xdefault~@~Y)
+#SPARK_YARN_DIST_FILES="" #Comma separated list of files to be distributed with the job.
+#SPARK_YARN_DIST_ARCHIVES="" #Comma separated list of archives to be distributed with the job.
+
+# Generic options for the daemons used in the standalone deploy mode
+
+# Alternate conf dir. (Default: ${SPARK_HOME}/conf)
+export SPARK_CONF_DIR=${SPARK_HOME:-{{spark_home}}}/conf
+
+# Where log files are stored.(Default:${SPARK_HOME}/logs)
+#export SPARK_LOG_DIR=${SPARK_HOME:-{{spark_home}}}/logs
+export SPARK_LOG_DIR={{spark_log_dir}}
+
+# Where the pid file is stored. (Default: /tmp)
+export SPARK_PID_DIR={{spark_pid_dir}}
+
+# A string representing this instance of spark.(Default: $USER)
+SPARK_IDENT_STRING=$USER
+
+# The scheduling priority for daemons. (Default: 0)
+SPARK_NICENESS=0
+
+export SPARK_PUBLIC_DNS={{spark_history_server_host}}
+export SPARK_HISTORY_OPTS="-Dspark.history.ui.port={{spark_history_ui_port}} -Dspark.history.fs.logDirectory={{spark_eventlog_dir_default}}"
+export HIVE_SERVER2_THRIFT_BIND_HOST={{spark_thrift_server_host}}
+export HIVE_SERVER2_THRIFT_PORT={{spark_thriftserver_port}}
+
+export HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
+
+# The java implementation to use.
+export JAVA_HOME={{java_home}}
+
+</value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/configuration/spark-javaopts-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/configuration/spark-javaopts-properties.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/configuration/spark-javaopts-properties.xml
new file mode 100755
index 0000000..c2db325
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/configuration/spark-javaopts-properties.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+  <property>
+    <name>content</name>
+    <description>Spark-javaopts-properties</description>
+    <value> </value>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/configuration/spark-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/configuration/spark-log4j.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/configuration/spark-log4j.xml
new file mode 100755
index 0000000..2ba64fb
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/configuration/spark-log4j.xml
@@ -0,0 +1,42 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+  <property>
+    <name>content</name>
+    <description>Spark-log4j-Properties</description>
+    <value>
+# Set everything to be logged to the console
+log4j.rootCategory=INFO, console
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n
+
+# Settings to quiet third party logs that are too verbose
+log4j.logger.org.eclipse.jetty=WARN
+log4j.logger.org.eclipse.jetty.util.component.AbstractLifeCycle=ERROR
+log4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=INFO
+log4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=INFO
+
+    </value>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/configuration/spark-metrics-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/configuration/spark-metrics-properties.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/configuration/spark-metrics-properties.xml
new file mode 100755
index 0000000..9af5f2e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/configuration/spark-metrics-properties.xml
@@ -0,0 +1,160 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+  <property>
+    <name>content</name>
+    <description>Spark-metrics-properties</description>
+    <value>
+# syntax: [instance].sink|source.[name].[options]=[value]
+
+# This file configures Spark's internal metrics system. The metrics system is
+# divided into instances which correspond to internal components.
+# Each instance can be configured to report its metrics to one or more sinks.
+# Accepted values for [instance] are "master", "worker", "executor", "driver",
+# and "applications". A wild card "*" can be used as an instance name, in
+# which case all instances will inherit the supplied property.
+#
+# Within an instance, a "source" specifies a particular set of grouped metrics.
+# there are two kinds of sources:
+# 1. Spark internal sources, like MasterSource, WorkerSource, etc, which will
+# collect a Spark component's internal state. Each instance is paired with a
+# Spark source that is added automatically.
+# 2. Common sources, like JvmSource, which will collect low level state.
+# These can be added through configuration options and are then loaded
+# using reflection.
+#
+# A "sink" specifies where metrics are delivered to. Each instance can be
+# assigned one or more sinks.
+#
+# The sink|source field specifies whether the property relates to a sink or
+# source.
+#
+# The [name] field specifies the name of source or sink.
+#
+# The [options] field is the specific property of this source or sink. The
+# source or sink is responsible for parsing this property.
+#
+# Notes:
+# 1. To add a new sink, set the "class" option to a fully qualified class
+# name (see examples below).
+# 2. Some sinks involve a polling period. The minimum allowed polling period
+# is 1 second.
+# 3. Wild card properties can be overridden by more specific properties.
+# For example, master.sink.console.period takes precedence over
+# *.sink.console.period.
+# 4. A metrics specific configuration
+# "spark.metrics.conf=${SPARK_HOME}/conf/metrics.properties" should be
+# added to Java properties using -Dspark.metrics.conf=xxx if you want to
+# customize metrics system. You can also put the file in ${SPARK_HOME}/conf
+# and it will be loaded automatically.
+# 5. MetricsServlet is added by default as a sink in master, worker and client
+# driver, you can send http request "/metrics/json" to get a snapshot of all the
+# registered metrics in json format. For master, requests "/metrics/master/json" and
+# "/metrics/applications/json" can be sent seperately to get metrics snapshot of
+# instance master and applications. MetricsServlet may not be configured by self.
+#
+
+## List of available sinks and their properties.
+
+# org.apache.spark.metrics.sink.ConsoleSink
+# Name: Default: Description:
+# period 10 Poll period
+# unit seconds Units of poll period
+
+# org.apache.spark.metrics.sink.CSVSink
+# Name: Default: Description:
+# period 10 Poll period
+# unit seconds Units of poll period
+# directory /tmp Where to store CSV files
+
+# org.apache.spark.metrics.sink.GangliaSink
+# Name: Default: Description:
+# host NONE Hostname or multicast group of Ganglia server
+# port NONE Port of Ganglia server(s)
+# period 10 Poll period
+# unit seconds Units of poll period
+# ttl 1 TTL of messages sent by Ganglia
+# mode multicast Ganglia network mode ('unicast' or 'multicast')
+
+# org.apache.spark.metrics.sink.JmxSink
+
+# org.apache.spark.metrics.sink.MetricsServlet
+# Name: Default: Description:
+# path VARIES* Path prefix from the web server root
+# sample false Whether to show entire set of samples for histograms ('false' or 'true')
+#
+# * Default path is /metrics/json for all instances except the master. The master has two paths:
+# /metrics/aplications/json # App information
+# /metrics/master/json # Master information
+
+# org.apache.spark.metrics.sink.GraphiteSink
+# Name: Default: Description:
+# host NONE Hostname of Graphite server
+# port NONE Port of Graphite server
+# period 10 Poll period
+# unit seconds Units of poll period
+# prefix EMPTY STRING Prefix to prepend to metric name
+
+## Examples
+# Enable JmxSink for all instances by class name
+#*.sink.jmx.class=org.apache.spark.metrics.sink.JmxSink
+
+# Enable ConsoleSink for all instances by class name
+#*.sink.console.class=org.apache.spark.metrics.sink.ConsoleSink
+
+# Polling period for ConsoleSink
+#*.sink.console.period=10
+
+#*.sink.console.unit=seconds
+
+# Master instance overlap polling period
+#master.sink.console.period=15
+
+#master.sink.console.unit=seconds
+
+# Enable CsvSink for all instances
+#*.sink.csv.class=org.apache.spark.metrics.sink.CsvSink
+
+# Polling period for CsvSink
+#*.sink.csv.period=1
+
+#*.sink.csv.unit=minutes
+
+# Polling directory for CsvSink
+#*.sink.csv.directory=/tmp/
+
+# Worker instance overlap polling period
+#worker.sink.csv.period=10
+
+#worker.sink.csv.unit=minutes
+
+# Enable jvm source for instance master, worker, driver and executor
+#master.source.jvm.class=org.apache.spark.metrics.source.JvmSource
+
+#worker.source.jvm.class=org.apache.spark.metrics.source.JvmSource
+
+#driver.source.jvm.class=org.apache.spark.metrics.source.JvmSource
+
+#executor.source.jvm.class=org.apache.spark.metrics.source.JvmSource
+
+    </value>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/kerberos.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/kerberos.json
new file mode 100755
index 0000000..6189324
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/kerberos.json
@@ -0,0 +1,55 @@
+{
+  "services": [
+    {
+      "name": "SPARK",
+      "identities": [
+        {
+          "name": "/smokeuser"
+        },
+        {
+          "name": "/hdfs"
+        },
+        {
+          "name": "sparkuser",
+          "principal": {
+            "value": "${spark-env/spark_user}-${cluster_name}@${realm}",
+            "type" : "user",
+            "configuration": "spark-defaults/spark.history.kerberos.principal",
+            "local_username" : "${spark-env/spark_user}"
+          },
+          "keytab": {
+            "file": "${keytab_dir}/spark.headless.keytab",
+            "owner": {
+              "name": "${spark-env/spark_user}",
+              "access": "r"
+            },
+            "group": {
+              "name": "${cluster-env/user_group}",
+               "access": ""
+            },
+            "configuration": "spark-defaults/spark.history.kerberos.keytab"
+           }
+        }
+      ],
+      "components": [
+        {
+          "name": "SPARK_JOBHISTORYSERVER"
+        },
+        {
+          "name": "SPARK_CLIENT"
+        },
+        {
+          "name": "SPARK_THRIFTSERVER",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "/HIVE/HIVE_SERVER/hive_server_hive"
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/metainfo.xml
new file mode 100755
index 0000000..02abc62
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/metainfo.xml
@@ -0,0 +1,187 @@
+<?xml version="1.0"?>
+<!--Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>SPARK</name>
+      <displayName>Spark</displayName>
+      <comment>Apache Spark is a fast and general engine for large-scale data processing</comment>
+      <version>1.4.1</version>
+      <components>
+        <component>
+          <name>SPARK_JOBHISTORYSERVER</name>
+          <displayName>Spark History Server</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+               <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
+               <scope>host</scope>
+               <auto-deploy>
+                 <enabled>true</enabled>
+               </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>YARN/YARN_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+             </auto-deploy>
+           </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/job_history_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+        <component>
+          <name>SPARK_THRIFTSERVER</name>
+          <displayName>Spark Thrift Server</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>HIVE/HIVE_METASTORE</name>
+              <scope>cluster</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>HIVE/HIVE_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/thrift_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+        <component>
+          <name>SPARK_CLIENT</name>
+          <displayName>Spark Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+               <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
+               <scope>host</scope>
+               <auto-deploy>
+                 <enabled>true</enabled>
+               </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>YARN/YARN_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+             </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/spark_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>env</type>
+              <fileName>spark-env.sh</fileName>
+              <dictionaryName>spark-env</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>properties</type>
+              <fileName>spark-defaults.conf</fileName>
+              <dictionaryName>spark-defaults</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>spark-log4j.properties</fileName>
+              <dictionaryName>spark-log4j</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>spark-*_IBM*</name>
+            </package>
+            <package>
+              <name>spark-python*_IBM*</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>spark-*</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <configuration-dependencies>
+        <config-type>spark-env</config-type>
+        <config-type>spark-defaults</config-type>
+        <config-type>spark-log4j</config-type>
+      </configuration-dependencies>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <requiredServices>
+        <service>HDFS</service>
+        <service>YARN</service>
+        <service>HIVE</service>
+      </requiredServices>
+
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/package/scripts/job_history_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/package/scripts/job_history_server.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/package/scripts/job_history_server.py
new file mode 100755
index 0000000..439defe
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/package/scripts/job_history_server.py
@@ -0,0 +1,167 @@
+#!/usr/bin/python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+import os
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+#from resource_management.libraries.functions.version import compare_versions, format_stack_version
+from resource_management.core.exceptions import ComponentIsNotRunning
+from resource_management.core.logger import Logger
+from resource_management.core import shell
+from spark import *
+
+
+class JobHistoryServer(Script):
+
+  def get_component_name(self):
+     return "spark-historyserver"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
+      conf_select.select(params.stack_name, "spark", params.version)
+      stack_select.select("spark-historyserver", params.version)
+      #Execute(format("stack-select set spark-historyserver {version}"))
+
+  def install(self, env):
+    self.install_packages(env)
+    import params
+
+    env.set_params(params)
+    self.configure(env)
+
+  def stop(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    self.configure(env)
+    daemon_cmd = format('{spark_history_server_stop}')
+    Execute(daemon_cmd,
+            user=params.spark_user,
+            environment={'JAVA_HOME': params.java_home}
+    )
+    if os.path.isfile(params.spark_history_server_pid_file):
+      os.remove(params.spark_history_server_pid_file)
+
+
+  def start(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    self.configure(env)
+    self.create_historyServer_directory()
+    self.copy_spark_yarn_jar()
+
+    if params.security_enabled:
+      spark_kinit_cmd = format("{kinit_path_local} -kt {spark_kerberos_keytab} {spark_principal}; ")
+      Execute(spark_kinit_cmd, user=params.spark_user)
+
+    # FIXME! TODO! remove this after soft link bug is fixed:
+    #if not os.path.islink('/usr/iop/current/spark'):
+    #  iop_version = get_iop_version()
+    #  cmd = 'ln -s /usr/iop/' + iop_version + '/spark /usr/iop/current/spark'
+    #  Execute(cmd)
+
+    daemon_cmd = format('{spark_history_server_start}')
+    no_op_test = format(
+      'ls {spark_history_server_pid_file} >/dev/null 2>&1 && ps -p `cat {spark_history_server_pid_file}` >/dev/null 2>&1')
+    Execute(daemon_cmd,
+            user=params.spark_user,
+            environment={'JAVA_HOME': params.java_home},
+            not_if=no_op_test
+    )
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    pid_file = format("{spark_history_server_pid_file}")
+    # Recursively check all existing pid files
+    check_process_status(pid_file)
+
+  def create_historyServer_directory(self):
+    import params
+
+    params.HdfsResource(params.spark_hdfs_user_dir,
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.spark_user,
+                         group=params.user_group,
+                         mode=params.spark_hdfs_user_mode)
+
+    params.HdfsResource(params.spark_eventlog_dir_default,
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.spark_user,
+                         group=params.user_group,
+                         mode=params.spark_eventlog_dir_mode)
+
+    params.HdfsResource(None, action="execute")
+
+  def copy_spark_yarn_jar(self):
+    import params
+
+    jar_src_file = params.spark_jar_src_dir + "/" + params.spark_jar_src_file
+    jar_dst_file = params.spark_jar_hdfs_dir + "/" + params.spark_jar_src_file
+    jar_dst_path = params.spark_jar_hdfs_dir
+
+    # Remove to enable refreshing jars during restart
+    hdfs_remove_cmd = "dfs -rm -R -skipTrash %s" % jar_dst_path
+
+    try:
+      ExecuteHadoop(hdfs_remove_cmd,
+                    user=params.hdfs_user,
+                    logoutput=True,
+                    conf_dir=params.hadoop_conf_dir,
+                    bin_dir=params.hadoop_bin_dir)
+    except Fail:
+      pass
+
+    params.HdfsResource(jar_dst_path,
+                        type="directory",
+                        action="create_on_execute",
+                        owner=params.spark_user,
+                        group=params.user_group,
+                        mode=params.spark_jar_hdfs_dir_mode)
+
+    params.HdfsResource(None, action="execute")
+
+    params.HdfsResource(InlineTemplate(jar_dst_file).get_content(),
+                        type="file",
+                        action="create_on_execute",
+                        source=jar_src_file,
+                        owner=params.spark_user,
+                        group=params.user_group,
+                        mode=params.spark_jar_file_mode)
+
+    params.HdfsResource(None, action="execute")
+
+  def configure(self, env):
+    import params
+
+    env.set_params(params)
+    spark(env)
+
+if __name__ == "__main__":
+  JobHistoryServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/package/scripts/params.py
new file mode 100755
index 0000000..53efae0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/package/scripts/params.py
@@ -0,0 +1,199 @@
+#!/usr/bin/python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.functions.default import default
+from resource_management import *
+from resource_management.libraries.functions import conf_select, stack_select
+from spark import *
+import status_params
+
+
+# a map of the Ambari role to the component name
+# for use with /usr/iop/current/<component>
+SERVER_ROLE_DIRECTORY_MAP = {
+  'SPARK_JOBHISTORYSERVER' : 'spark-historyserver',
+  'SPARK_CLIENT' : 'spark-client',
+  'SPARK_THRIFTSERVER' : 'spark-thriftserver'
+}
+upgrade_direction = default("/commandParams/upgrade_direction", None)
+
+component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "SPARK_CLIENT")
+
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
+version = default("/commandParams/version", None)
+stack_name = default("/hostLevelParams/stack_name", None)
+
+hadoop_home = "/usr/iop/current/hadoop-client"
+spark_conf = format("/usr/iop/current/{component_directory}/conf")
+spark_log_dir = config['configurations']['spark-env']['spark_log_dir']
+spark_pid_dir = status_params.spark_pid_dir
+spark_role_root = "spark-client"
+
+command_role = default("/role", "")
+
+if command_role == "SPARK_CLIENT":
+  spark_role_root = "spark-client"
+elif command_role == "SPARK_JOBHISTORYSERVER":
+  spark_role_root = "spark-historyserver"
+elif command_role == "SPARK_THRIFTSERVER":
+  spark_role_root = "spark-thriftserver"
+
+spark_home = format("/usr/iop/current/{spark_role_root}")
+if not os.path.exists(spark_home):
+  os.symlink('/usr/iop/current/spark', spark_home)
+
+java_home = config['hostLevelParams']['java_home']
+
+spark_user = status_params.spark_user
+hive_user = status_params.hive_user
+spark_group = status_params.spark_group
+user_group = status_params.user_group
+
+spark_hdfs_user_dir = format("/user/{spark_user}")
+spark_hdfs_user_mode = 0755
+spark_eventlog_dir_mode = 01777
+spark_jar_hdfs_dir = "/iop/apps/4.1.0.0/spark/jars"
+spark_jar_hdfs_dir_mode = 0755
+spark_jar_file_mode = 0444
+spark_jar_src_dir = "/usr/iop/current/spark-historyserver/lib"
+spark_jar_src_file = "spark-assembly.jar"
+
+spark_history_server_pid_file = status_params.spark_history_server_pid_file
+spark_thrift_server_pid_file = status_params.spark_thrift_server_pid_file
+
+spark_history_server_start = format("{spark_home}/sbin/start-history-server.sh")
+spark_history_server_stop = format("{spark_home}/sbin/stop-history-server.sh")
+
+spark_thrift_server_start = format("{spark_home}/sbin/start-thriftserver.sh")
+spark_thrift_server_stop = format("{spark_home}/sbin/stop-thriftserver.sh")
+
+spark_submit_cmd = format("{spark_home}/bin/spark-submit")
+spark_smoke_example = "org.apache.spark.examples.SparkPi"
+spark_service_check_cmd = format(
+  "{spark_submit_cmd} --class {spark_smoke_example}  --master yarn-cluster  --num-executors 1 --driver-memory 256m  --executor-memory 256m   --executor-cores 1  {spark_home}/lib/spark-examples*.jar 1")
+
+spark_jobhistoryserver_hosts = default("/clusterHostInfo/spark_jobhistoryserver_hosts", [])
+spark_thriftserver_hosts = default("/clusterHostInfo/spark_thriftserver_hosts", [])
+namenode_hosts = default("/clusterHostInfo/namenode_host", [])
+has_namenode = not len(namenode_hosts) == 0
+
+if len(spark_jobhistoryserver_hosts) > 0:
+  spark_history_server_host = spark_jobhistoryserver_hosts[0]
+else:
+  spark_history_server_host = "localhost"
+
+if len(spark_thriftserver_hosts) > 0:
+  spark_thrift_server_host = spark_thriftserver_hosts[0]
+else:
+  spark_thrift_server_host = "localhost"
+# spark-defaults params
+if has_namenode:
+  namenode_host = str(namenode_hosts[0])
+else:
+  namenode_host = "localhost"
+
+hadoop_fs_defaultfs = config['configurations']['core-site']['fs.defaultFS']
+spark_eventlog_dir_default=hadoop_fs_defaultfs + config['configurations']['spark-defaults']['spark.eventLog.dir']
+spark_yarn_jar_default=hadoop_fs_defaultfs + '/iop/apps/4.1.0.0/spark/jars/spark-assembly.jar'
+
+spark_yarn_applicationMaster_waitTries = default(
+  "/configurations/spark-defaults/spark.yarn.applicationMaster.waitTries", '10')
+spark_yarn_submit_file_replication = default("/configurations/spark-defaults/spark.yarn.submit.file.replication", '3')
+spark_yarn_preserve_staging_files = default("/configurations/spark-defaults/spark.yarn.preserve.staging.files", "false")
+spark_yarn_scheduler_heartbeat_interval = default(
+  "/configurations/spark-defaults/spark.yarn.scheduler.heartbeat.interval-ms", "5000")
+spark_yarn_queue = default("/configurations/spark-defaults/spark.yarn.queue", "default")
+spark_yarn_containerLauncherMaxThreads = default(
+  "/configurations/spark-defaults/spark.yarn.containerLauncherMaxThreads", "25")
+spark_yarn_max_executor_failures = default("/configurations/spark-defaults/spark.yarn.max.executor.failures", "3")
+spark_yarn_executor_memoryOverhead = default("/configurations/spark-defaults/spark.yarn.executor.memoryOverhead", "384")
+spark_yarn_driver_memoryOverhead = default("/configurations/spark-defaults/spark.yarn.driver.memoryOverhead", "384")
+spark_history_ui_port = default("/configurations/spark-defaults/spark.history.ui.port", "18080")
+spark_thriftserver_port = default("/configurations/spark-env/spark_thriftserver_port", "10002")
+spark_eventlog_enabled = default("/configurations/spark-defaults/spark.eventLog.enabled", "true")
+spark_eventlog_dir = default("/configurations/spark-defaults/spark.eventLog.dir", spark_eventlog_dir_default)
+spark_yarn_jar = default("/configurations/spark-defaults/spark.yarn.jar", spark_yarn_jar_default)
+
+# add the properties that cannot be configured thru UI
+spark_conf_properties_map = dict(config['configurations']['spark-defaults'])
+spark_conf_properties_map["spark.yarn.historyServer.address"] = spark_history_server_host + ":" + str(spark_history_ui_port)
+spark_conf_properties_map["spark.yarn.jar"] = spark_yarn_jar
+spark_conf_properties_map["spark.eventLog.dir"] = spark_eventlog_dir_default
+
+spark_env_sh = config['configurations']['spark-env']['content']
+spark_log4j = config['configurations']['spark-log4j']['content']
+#spark_metrics_properties = config['configurations']['spark-metrics-properties']['content']
+spark_javaopts_properties = config['configurations']['spark-javaopts-properties']['content']
+hive_server_host = default("/clusterHostInfo/hive_server_host", [])
+is_hive_installed = not len(hive_server_host) == 0
+
+iop_full_version = get_iop_version()
+
+spark_driver_extraJavaOptions = str(config['configurations']['spark-defaults']['spark.driver.extraJavaOptions'])
+if spark_driver_extraJavaOptions.find('-Diop.version') == -1:
+  spark_driver_extraJavaOptions = spark_driver_extraJavaOptions + ' -Diop.version=' + str(iop_full_version)
+
+spark_yarn_am_extraJavaOptions = str(config['configurations']['spark-defaults']['spark.yarn.am.extraJavaOptions'])
+if spark_yarn_am_extraJavaOptions.find('-Diop.version') == -1:
+  spark_yarn_am_extraJavaOptions = spark_yarn_am_extraJavaOptions + ' -Diop.version=' + str(iop_full_version)
+
+spark_javaopts_properties = str(spark_javaopts_properties)
+if spark_javaopts_properties.find('-Diop.version') == -1:
+  spark_javaopts_properties = spark_javaopts_properties+ ' -Diop.version=' + str(iop_full_version)
+
+security_enabled = status_params.security_enabled
+kinit_path_local = functions.get_kinit_path()
+spark_kerberos_keytab =  config['configurations']['spark-defaults']['spark.history.kerberos.keytab']
+spark_kerberos_principal =  config['configurations']['spark-defaults']['spark.history.kerberos.principal']
+if security_enabled:
+  spark_principal = spark_kerberos_principal.replace('_HOST',spark_history_server_host.lower())
+# for create_hdfs_directory
+
+# To create hdfs directory
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+
+hostname = config["hostname"]
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+kinit_path_local = functions.get_kinit_path()
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
+import functools
+#create partial functions with common arguments for every HdfsResource call
+#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
+)

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/package/scripts/service_check.py
new file mode 100755
index 0000000..4c9ea4a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/package/scripts/service_check.py
@@ -0,0 +1,78 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management import *
+import subprocess
+import time
+
+class SparkServiceCheck(Script):
+  def service_check(self, env):
+    import params
+
+    env.set_params(params)
+
+    # smoke_cmd = params.spark_service_check_cmd
+    # code, output = shell.call(smoke_cmd, timeout=100)
+    # if code == 0:
+    #   Logger.info('Spark-on-Yarn Job submitted successfully')
+    # else:
+    #   Logger.info('Spark-on-Yarn Job cannot be submitted')
+    #   raise ComponentIsNotRunning()
+
+    command = "curl"
+    httpGssnegotiate = "--negotiate"
+    userpswd = "-u:"
+    insecure = "-k"
+    silent = "-s"
+    out = "-o /dev/null"
+    head = "-w'%{http_code}'"
+    url = 'http://' + params.spark_history_server_host + ':' + str(params.spark_history_ui_port)
+
+    command_with_flags = [command, silent, out, head, httpGssnegotiate, userpswd, insecure, url]
+
+    is_running = False
+    for i in range(1,11):
+      proc = subprocess.Popen(command_with_flags, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+      Logger.info("Try %d, command: %s" % (i, " ".join(command_with_flags)))
+      (stdout, stderr) = proc.communicate()
+      response = stdout
+      if '200' in response:
+        is_running = True
+        Logger.info('Spark Job History Server up and running')
+        break
+      Logger.info("Response: %s" % str(response))
+      time.sleep(5)
+
+    if is_running == False :
+      Logger.info('Spark Job History Server not running.')
+      raise ComponentIsNotRunning()
+
+
+
+    #command_with_flags = [command, silent, out, head, httpGssnegotiate, userpswd, insecure, url]
+    # proc = subprocess.Popen(command_with_flags, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+    # (stdout, stderr) = proc.communicate()
+    # response = stdout
+    # if '200' in response:
+    #   Logger.info('Spark Job History Server up and running')
+    # else:
+    #   Logger.info('Spark Job History Server not running.')
+    #   raise ComponentIsNotRunning()
+
+if __name__ == "__main__":
+  SparkServiceCheck().execute()


[11/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_206.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_206.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_206.py
new file mode 100755
index 0000000..8798628
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_206.py
@@ -0,0 +1,2006 @@
+#!/usr/bin/env ambari-python-wrap
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import re
+import os
+import sys
+import socket
+
+from math import ceil, floor
+
+from resource_management.core.logger import Logger
+from resource_management.libraries.functions.mounted_dirs_helper import get_mounts_with_multiple_data_dirs
+
+from stack_advisor import DefaultStackAdvisor
+
+
+class HDP206StackAdvisor(DefaultStackAdvisor):
+
+  def __init__(self):
+    super(HDP206StackAdvisor, self).__init__()
+    Logger.initialize_logger()
+
+  def getComponentLayoutValidations(self, services, hosts):
+    """Returns array of Validation objects about issues with hostnames components assigned to"""
+    items = super(HDP206StackAdvisor, self).getComponentLayoutValidations(services, hosts)
+
+    # Validating NAMENODE and SECONDARY_NAMENODE are on different hosts if possible
+    # Use a set for fast lookup
+    hostsSet =  set(super(HDP206StackAdvisor, self).getActiveHosts([host["Hosts"] for host in hosts["items"]]))  #[host["Hosts"]["host_name"] for host in hosts["items"]]
+    hostsCount = len(hostsSet)
+
+    componentsListList = [service["components"] for service in services["services"]]
+    componentsList = [item for sublist in componentsListList for item in sublist]
+    nameNodeHosts = [component["StackServiceComponents"]["hostnames"] for component in componentsList if component["StackServiceComponents"]["component_name"] == "NAMENODE"]
+    secondaryNameNodeHosts = [component["StackServiceComponents"]["hostnames"] for component in componentsList if component["StackServiceComponents"]["component_name"] == "SECONDARY_NAMENODE"]
+
+    # Validating cardinality
+    for component in componentsList:
+      if component["StackServiceComponents"]["cardinality"] is not None:
+         componentName = component["StackServiceComponents"]["component_name"]
+         componentDisplayName = component["StackServiceComponents"]["display_name"]
+         componentHosts = []
+         if component["StackServiceComponents"]["hostnames"] is not None:
+           componentHosts = [componentHost for componentHost in component["StackServiceComponents"]["hostnames"] if componentHost in hostsSet]
+         componentHostsCount = len(componentHosts)
+         cardinality = str(component["StackServiceComponents"]["cardinality"])
+         # cardinality types: null, 1+, 1-2, 1, ALL
+         message = None
+         if "+" in cardinality:
+           hostsMin = int(cardinality[:-1])
+           if componentHostsCount < hostsMin:
+             message = "At least {0} {1} components should be installed in cluster.".format(hostsMin, componentDisplayName)
+         elif "-" in cardinality:
+           nums = cardinality.split("-")
+           hostsMin = int(nums[0])
+           hostsMax = int(nums[1])
+           if componentHostsCount > hostsMax or componentHostsCount < hostsMin:
+             message = "Between {0} and {1} {2} components should be installed in cluster.".format(hostsMin, hostsMax, componentDisplayName)
+         elif "ALL" == cardinality:
+           if componentHostsCount != hostsCount:
+             message = "{0} component should be installed on all hosts in cluster.".format(componentDisplayName)
+         else:
+           if componentHostsCount != int(cardinality):
+             message = "Exactly {0} {1} components should be installed in cluster.".format(int(cardinality), componentDisplayName)
+
+         if message is not None:
+           items.append({"type": 'host-component', "level": 'ERROR', "message": message, "component-name": componentName})
+
+    # Validating host-usage
+    usedHostsListList = [component["StackServiceComponents"]["hostnames"] for component in componentsList if not self.isComponentNotValuable(component)]
+    usedHostsList = [item for sublist in usedHostsListList for item in sublist]
+    nonUsedHostsList = [item for item in hostsSet if item not in usedHostsList]
+    for host in nonUsedHostsList:
+      items.append( { "type": 'host-component', "level": 'ERROR', "message": 'Host is not used', "host": str(host) } )
+
+    return items
+
+  def getServiceConfigurationRecommenderDict(self):
+    return {
+      "YARN": self.recommendYARNConfigurations,
+      "MAPREDUCE2": self.recommendMapReduce2Configurations,
+      "HDFS": self.recommendHDFSConfigurations,
+      "HBASE": self.recommendHbaseConfigurations,
+      "STORM": self.recommendStormConfigurations,
+      "AMBARI_METRICS": self.recommendAmsConfigurations,
+      "RANGER": self.recommendRangerConfigurations
+    }
+
+  def recommendYARNConfigurations(self, configurations, clusterData, services, hosts):
+    putYarnProperty = self.putProperty(configurations, "yarn-site", services)
+    putYarnPropertyAttribute = self.putPropertyAttribute(configurations, "yarn-site")
+    putYarnEnvProperty = self.putProperty(configurations, "yarn-env", services)
+    nodemanagerMinRam = 1048576 # 1TB in mb
+    if "referenceNodeManagerHost" in clusterData:
+      nodemanagerMinRam = min(clusterData["referenceNodeManagerHost"]["total_mem"]/1024, nodemanagerMinRam)
+    putYarnProperty('yarn.nodemanager.resource.memory-mb', int(round(min(clusterData['containers'] * clusterData['ramPerContainer'], nodemanagerMinRam))))
+    putYarnProperty('yarn.scheduler.minimum-allocation-mb', int(clusterData['ramPerContainer']))
+    putYarnProperty('yarn.scheduler.maximum-allocation-mb', int(configurations["yarn-site"]["properties"]["yarn.nodemanager.resource.memory-mb"]))
+    putYarnEnvProperty('min_user_id', self.get_system_min_uid())
+
+    sc_queue_name = self.recommendYarnQueue(services, "yarn-env", "service_check.queue.name")
+    if sc_queue_name is not None:
+      putYarnEnvProperty("service_check.queue.name", sc_queue_name)
+
+    containerExecutorGroup = 'hadoop'
+    if 'cluster-env' in services['configurations'] and 'user_group' in services['configurations']['cluster-env']['properties']:
+      containerExecutorGroup = services['configurations']['cluster-env']['properties']['user_group']
+    putYarnProperty("yarn.nodemanager.linux-container-executor.group", containerExecutorGroup)
+
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    if "TEZ" in servicesList:
+        ambari_user = self.getAmbariUser(services)
+        ambariHostName = socket.getfqdn()
+        putYarnProperty("yarn.timeline-service.http-authentication.proxyuser.{0}.hosts".format(ambari_user), ambariHostName)
+        putYarnProperty("yarn.timeline-service.http-authentication.proxyuser.{0}.groups".format(ambari_user), "*")
+        old_ambari_user = self.getOldAmbariUser(services)
+        if old_ambari_user is not None:
+            putYarnPropertyAttribute("yarn.timeline-service.http-authentication.proxyuser.{0}.hosts".format(old_ambari_user), 'delete', 'true')
+            putYarnPropertyAttribute("yarn.timeline-service.http-authentication.proxyuser.{0}.groups".format(old_ambari_user), 'delete', 'true')
+
+
+  def recommendMapReduce2Configurations(self, configurations, clusterData, services, hosts):
+    putMapredProperty = self.putProperty(configurations, "mapred-site", services)
+    putMapredProperty('yarn.app.mapreduce.am.resource.mb', int(clusterData['amMemory']))
+    putMapredProperty('yarn.app.mapreduce.am.command-opts', "-Xmx" + str(int(round(0.8 * clusterData['amMemory']))) + "m")
+    putMapredProperty('mapreduce.map.memory.mb', clusterData['mapMemory'])
+    putMapredProperty('mapreduce.reduce.memory.mb', int(clusterData['reduceMemory']))
+    putMapredProperty('mapreduce.map.java.opts', "-Xmx" + str(int(round(0.8 * clusterData['mapMemory']))) + "m")
+    putMapredProperty('mapreduce.reduce.java.opts', "-Xmx" + str(int(round(0.8 * clusterData['reduceMemory']))) + "m")
+    putMapredProperty('mapreduce.task.io.sort.mb', min(int(round(0.4 * clusterData['mapMemory'])), 1024))
+    mr_queue = self.recommendYarnQueue(services, "mapred-site", "mapreduce.job.queuename")
+    if mr_queue is not None:
+      putMapredProperty("mapreduce.job.queuename", mr_queue)
+
+  def getAmbariUser(self, services):
+    ambari_user = services['ambari-server-properties']['ambari-server.user']
+    if "cluster-env" in services["configurations"] \
+          and "ambari_principal_name" in services["configurations"]["cluster-env"]["properties"] \
+                and "security_enabled" in services["configurations"]["cluster-env"]["properties"] \
+                    and services["configurations"]["cluster-env"]["properties"]["security_enabled"].lower() == "true":
+      ambari_user = services["configurations"]["cluster-env"]["properties"]["ambari_principal_name"]
+      ambari_user = ambari_user.split('@')[0]
+    return ambari_user
+
+  def getOldAmbariUser(self, services):
+    ambari_user = None
+    if "cluster-env" in services["configurations"]:
+      if "security_enabled" in services["configurations"]["cluster-env"]["properties"] \
+              and services["configurations"]["cluster-env"]["properties"]["security_enabled"].lower() == "true":
+         ambari_user = services['ambari-server-properties']['ambari-server.user']
+      elif "ambari_principal_name" in services["configurations"]["cluster-env"]["properties"]:
+         ambari_user = services["configurations"]["cluster-env"]["properties"]["ambari_principal_name"]
+         ambari_user = ambari_user.split('@')[0]
+    return ambari_user
+
+  def recommendAmbariProxyUsersForHDFS(self, services, servicesList, putCoreSiteProperty, putCoreSitePropertyAttribute):
+      if "HDFS" in servicesList:
+          ambari_user = self.getAmbariUser(services)
+          ambariHostName = socket.getfqdn()
+          putCoreSiteProperty("hadoop.proxyuser.{0}.hosts".format(ambari_user), ambariHostName)
+          putCoreSiteProperty("hadoop.proxyuser.{0}.groups".format(ambari_user), "*")
+          old_ambari_user = self.getOldAmbariUser(services)
+          if old_ambari_user is not None:
+            putCoreSitePropertyAttribute("hadoop.proxyuser.{0}.hosts".format(old_ambari_user), 'delete', 'true')
+            putCoreSitePropertyAttribute("hadoop.proxyuser.{0}.groups".format(old_ambari_user), 'delete', 'true')
+
+  def recommendHadoopProxyUsers (self, configurations, services, hosts):
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    users = {}
+
+    if 'forced-configurations' not in services:
+      services["forced-configurations"] = []
+
+    if "HDFS" in servicesList:
+      hdfs_user = None
+      if "hadoop-env" in services["configurations"] and "hdfs_user" in services["configurations"]["hadoop-env"]["properties"]:
+        hdfs_user = services["configurations"]["hadoop-env"]["properties"]["hdfs_user"]
+        if not hdfs_user in users and hdfs_user is not None:
+          users[hdfs_user] = {"propertyHosts" : "*","propertyGroups" : "*", "config" : "hadoop-env", "propertyName" : "hdfs_user"}
+
+    if "OOZIE" in servicesList:
+      oozie_user = None
+      if "oozie-env" in services["configurations"] and "oozie_user" in services["configurations"]["oozie-env"]["properties"]:
+        oozie_user = services["configurations"]["oozie-env"]["properties"]["oozie_user"]
+        oozieServerrHosts = self.getHostsWithComponent("OOZIE", "OOZIE_SERVER", services, hosts)
+        if oozieServerrHosts is not None:
+          oozieServerHostsNameList = []
+          for oozieServerHost in oozieServerrHosts:
+            oozieServerHostsNameList.append(oozieServerHost["Hosts"]["host_name"])
+          oozieServerHostsNames = ",".join(oozieServerHostsNameList)
+          if not oozie_user in users and oozie_user is not None:
+            users[oozie_user] = {"propertyHosts" : oozieServerHostsNames,"propertyGroups" : "*", "config" : "oozie-env", "propertyName" : "oozie_user"}
+
+    hive_user = None
+    if "HIVE" in servicesList:
+      webhcat_user = None
+      if "hive-env" in services["configurations"] and "hive_user" in services["configurations"]["hive-env"]["properties"] \
+              and "webhcat_user" in services["configurations"]["hive-env"]["properties"]:
+        hive_user = services["configurations"]["hive-env"]["properties"]["hive_user"]
+        webhcat_user = services["configurations"]["hive-env"]["properties"]["webhcat_user"]
+        hiveServerHosts = self.getHostsWithComponent("HIVE", "HIVE_SERVER", services, hosts)
+        hiveServerInteractiveHosts = self.getHostsWithComponent("HIVE", "HIVE_SERVER_INTERACTIVE", services, hosts)
+        webHcatServerHosts = self.getHostsWithComponent("HIVE", "WEBHCAT_SERVER", services, hosts)
+
+        if hiveServerHosts is not None:
+          hiveServerHostsNameList = []
+          for hiveServerHost in hiveServerHosts:
+            hiveServerHostsNameList.append(hiveServerHost["Hosts"]["host_name"])
+          # Append Hive Server Interactive host as well, as it is Hive2/HiveServer2 component.
+          if hiveServerInteractiveHosts:
+            for hiveServerInteractiveHost in hiveServerInteractiveHosts:
+              hiveServerInteractiveHostName = hiveServerInteractiveHost["Hosts"]["host_name"]
+              if hiveServerInteractiveHostName not in hiveServerHostsNameList:
+                hiveServerHostsNameList.append(hiveServerInteractiveHostName)
+                Logger.info("Appended (if not exiting), Hive Server Interactive Host : '{0}', to Hive Server Host List : '{1}'".format(hiveServerInteractiveHostName, hiveServerHostsNameList))
+
+          hiveServerHostsNames = ",".join(hiveServerHostsNameList)  # includes Hive Server interactive host also.
+          Logger.info("Hive Server and Hive Server Interactive (if enabled) Host List : {0}".format(hiveServerHostsNameList))
+          if not hive_user in users and hive_user is not None:
+            users[hive_user] = {"propertyHosts" : hiveServerHostsNames,"propertyGroups" : "*", "config" : "hive-env", "propertyName" : "hive_user"}
+
+        if webHcatServerHosts is not None:
+          webHcatServerHostsNameList = []
+          for webHcatServerHost in webHcatServerHosts:
+            webHcatServerHostsNameList.append(webHcatServerHost["Hosts"]["host_name"])
+          webHcatServerHostsNames = ",".join(webHcatServerHostsNameList)
+          if not webhcat_user in users and webhcat_user is not None:
+            users[webhcat_user] = {"propertyHosts" : webHcatServerHostsNames,"propertyGroups" : "*", "config" : "hive-env", "propertyName" : "webhcat_user"}
+
+    if "YARN" in servicesList:
+      yarn_user = None
+      if "yarn-env" in services["configurations"] and "yarn_user" in services["configurations"]["yarn-env"]["properties"]:
+        yarn_user = services["configurations"]["yarn-env"]["properties"]["yarn_user"]
+        rmHosts = self.getHostsWithComponent("YARN", "RESOURCEMANAGER", services, hosts)
+
+        if len(rmHosts) > 1:
+          rmHostsNameList = []
+          for rmHost in rmHosts:
+            rmHostsNameList.append(rmHost["Hosts"]["host_name"])
+          rmHostsNames = ",".join(rmHostsNameList)
+          if not yarn_user in users and yarn_user is not None:
+            users[yarn_user] = {"propertyHosts" : rmHostsNames, "config" : "yarn-env", "propertyName" : "yarn_user"}
+
+
+    if "FALCON" in servicesList:
+      falconUser = None
+      if "falcon-env" in services["configurations"] and "falcon_user" in services["configurations"]["falcon-env"]["properties"]:
+        falconUser = services["configurations"]["falcon-env"]["properties"]["falcon_user"]
+        if not falconUser in users and falconUser is not None:
+          users[falconUser] = {"propertyHosts" : "*","propertyGroups" : "*", "config" : "falcon-env", "propertyName" : "falcon_user"}
+
+    if "SPARK" in servicesList:
+      livyUser = None
+      if "livy-env" in services["configurations"] and "livy_user" in services["configurations"]["livy-env"]["properties"]:
+        livyUser = services["configurations"]["livy-env"]["properties"]["livy_user"]
+        if not livyUser in users and livyUser is not None:
+          users[livyUser] = {"propertyHosts" : "*","propertyGroups" : "*", "config" : "livy-env", "propertyName" : "livy_user"}
+
+    putCoreSiteProperty = self.putProperty(configurations, "core-site", services)
+    putCoreSitePropertyAttribute = self.putPropertyAttribute(configurations, "core-site")
+
+    for user_name, user_properties in users.iteritems():
+      if hive_user and hive_user == user_name:
+        if "propertyHosts" in user_properties:
+          services["forced-configurations"].append({"type" : "core-site", "name" : "hadoop.proxyuser.{0}.hosts".format(hive_user)})
+      # Add properties "hadoop.proxyuser.*.hosts", "hadoop.proxyuser.*.groups" to core-site for all users
+      putCoreSiteProperty("hadoop.proxyuser.{0}.hosts".format(user_name) , user_properties["propertyHosts"])
+      Logger.info("Updated hadoop.proxyuser.{0}.hosts as : {1}".format(hive_user, user_properties["propertyHosts"]))
+      if "propertyGroups" in user_properties:
+        putCoreSiteProperty("hadoop.proxyuser.{0}.groups".format(user_name) , user_properties["propertyGroups"])
+
+      # Remove old properties if user was renamed
+      userOldValue = getOldValue(self, services, user_properties["config"], user_properties["propertyName"])
+      if userOldValue is not None and userOldValue != user_name:
+        putCoreSitePropertyAttribute("hadoop.proxyuser.{0}.hosts".format(userOldValue), 'delete', 'true')
+        services["forced-configurations"].append({"type" : "core-site", "name" : "hadoop.proxyuser.{0}.hosts".format(userOldValue)})
+        services["forced-configurations"].append({"type" : "core-site", "name" : "hadoop.proxyuser.{0}.hosts".format(user_name)})
+
+        if "propertyGroups" in user_properties:
+          putCoreSitePropertyAttribute("hadoop.proxyuser.{0}.groups".format(userOldValue), 'delete', 'true')
+          services["forced-configurations"].append({"type" : "core-site", "name" : "hadoop.proxyuser.{0}.groups".format(userOldValue)})
+          services["forced-configurations"].append({"type" : "core-site", "name" : "hadoop.proxyuser.{0}.groups".format(user_name)})
+
+    self.recommendAmbariProxyUsersForHDFS(services, servicesList, putCoreSiteProperty, putCoreSitePropertyAttribute)
+
+  def recommendHDFSConfigurations(self, configurations, clusterData, services, hosts):
+    putHDFSProperty = self.putProperty(configurations, "hadoop-env", services)
+    putHDFSSiteProperty = self.putProperty(configurations, "hdfs-site", services)
+    putHDFSSitePropertyAttributes = self.putPropertyAttribute(configurations, "hdfs-site")
+    putHDFSProperty('namenode_heapsize', max(int(clusterData['totalAvailableRam'] / 2), 1024))
+    putHDFSProperty = self.putProperty(configurations, "hadoop-env", services)
+    putHDFSProperty('namenode_opt_newsize', max(int(clusterData['totalAvailableRam'] / 8), 128))
+    putHDFSProperty = self.putProperty(configurations, "hadoop-env", services)
+    putHDFSProperty('namenode_opt_maxnewsize', max(int(clusterData['totalAvailableRam'] / 8), 256))
+
+    # Check if NN HA is enabled and recommend removing dfs.namenode.rpc-address
+    hdfsSiteProperties = getServicesSiteProperties(services, "hdfs-site")
+    nameServices = None
+    if hdfsSiteProperties and 'dfs.internal.nameservices' in hdfsSiteProperties:
+      nameServices = hdfsSiteProperties['dfs.internal.nameservices']
+    if nameServices is None and hdfsSiteProperties and 'dfs.nameservices' in hdfsSiteProperties:
+      nameServices = hdfsSiteProperties['dfs.nameservices']
+    if nameServices and "dfs.ha.namenodes.%s" % nameServices in hdfsSiteProperties:
+      namenodes = hdfsSiteProperties["dfs.ha.namenodes.%s" % nameServices]
+      if len(namenodes.split(',')) > 1:
+        putHDFSSitePropertyAttributes("dfs.namenode.rpc-address", "delete", "true")
+
+    #Initialize default 'dfs.datanode.data.dir' if needed
+    if (not hdfsSiteProperties) or ('dfs.datanode.data.dir' not in hdfsSiteProperties):
+      dataDirs = '/hadoop/hdfs/data'
+      putHDFSSiteProperty('dfs.datanode.data.dir', dataDirs)
+    else:
+      dataDirs = hdfsSiteProperties['dfs.datanode.data.dir'].split(",")
+
+    # dfs.datanode.du.reserved should be set to 10-15% of volume size
+    # For each host selects maximum size of the volume. Then gets minimum for all hosts.
+    # This ensures that each host will have at least one data dir with available space.
+    reservedSizeRecommendation = 0l #kBytes
+    for host in hosts["items"]:
+      mountPoints = []
+      mountPointDiskAvailableSpace = [] #kBytes
+      for diskInfo in host["Hosts"]["disk_info"]:
+        mountPoints.append(diskInfo["mountpoint"])
+        mountPointDiskAvailableSpace.append(long(diskInfo["size"]))
+
+      maxFreeVolumeSizeForHost = 0l #kBytes
+      for dataDir in dataDirs:
+        mp = getMountPointForDir(dataDir, mountPoints)
+        for i in range(len(mountPoints)):
+          if mp == mountPoints[i]:
+            if mountPointDiskAvailableSpace[i] > maxFreeVolumeSizeForHost:
+              maxFreeVolumeSizeForHost = mountPointDiskAvailableSpace[i]
+
+      if not reservedSizeRecommendation or maxFreeVolumeSizeForHost and maxFreeVolumeSizeForHost < reservedSizeRecommendation:
+        reservedSizeRecommendation = maxFreeVolumeSizeForHost
+
+    if reservedSizeRecommendation:
+      reservedSizeRecommendation = max(reservedSizeRecommendation * 1024 / 8, 1073741824) # At least 1Gb is reserved
+      putHDFSSiteProperty('dfs.datanode.du.reserved', reservedSizeRecommendation) #Bytes
+
+    # recommendations for "hadoop.proxyuser.*.hosts", "hadoop.proxyuser.*.groups" properties in core-site
+    self.recommendHadoopProxyUsers(configurations, services, hosts)
+
+  def recommendHbaseConfigurations(self, configurations, clusterData, services, hosts):
+    # recommendations for HBase env config
+
+    # If cluster size is < 100, hbase master heap = 2G
+    # else If cluster size is < 500, hbase master heap = 4G
+    # else hbase master heap = 8G
+    # for small test clusters use 1 gb
+    hostsCount = 0
+    if hosts and "items" in hosts:
+      hostsCount = len(hosts["items"])
+
+    hbaseMasterRam = {
+      hostsCount < 20: 1,
+      20 <= hostsCount < 100: 2,
+      100 <= hostsCount < 500: 4,
+      500 <= hostsCount: 8
+    }[True]
+
+    putHbaseProperty = self.putProperty(configurations, "hbase-env", services)
+    putHbaseProperty('hbase_regionserver_heapsize', int(clusterData['hbaseRam']) * 1024)
+    putHbaseProperty('hbase_master_heapsize', hbaseMasterRam * 1024)
+
+    # recommendations for HBase site config
+    putHbaseSiteProperty = self.putProperty(configurations, "hbase-site", services)
+
+    if 'hbase-site' in services['configurations'] and 'hbase.superuser' in services['configurations']['hbase-site']['properties'] \
+      and 'hbase-env' in services['configurations'] and 'hbase_user' in services['configurations']['hbase-env']['properties'] \
+      and services['configurations']['hbase-env']['properties']['hbase_user'] != services['configurations']['hbase-site']['properties']['hbase.superuser']:
+      putHbaseSiteProperty("hbase.superuser", services['configurations']['hbase-env']['properties']['hbase_user'])
+
+
+  def recommendRangerConfigurations(self, configurations, clusterData, services, hosts):
+
+    putRangerAdminProperty = self.putProperty(configurations, "admin-properties", services)
+
+    # Build policymgr_external_url
+    protocol = 'http'
+    ranger_admin_host = 'localhost'
+    port = '6080'
+
+    # Check if http is disabled. For HDP-2.3 this can be checked in ranger-admin-site/ranger.service.http.enabled
+    # For Ranger-0.4.0 this can be checked in ranger-site/http.enabled
+    if ('ranger-site' in services['configurations'] and 'http.enabled' in services['configurations']['ranger-site']['properties'] \
+      and services['configurations']['ranger-site']['properties']['http.enabled'].lower() == 'false') or \
+      ('ranger-admin-site' in services['configurations'] and 'ranger.service.http.enabled' in services['configurations']['ranger-admin-site']['properties'] \
+      and services['configurations']['ranger-admin-site']['properties']['ranger.service.http.enabled'].lower() == 'false'):
+      # HTTPS protocol is used
+      protocol = 'https'
+      # Starting Ranger-0.5.0.2.3 port stored in ranger-admin-site ranger.service.https.port
+      if 'ranger-admin-site' in services['configurations'] and \
+          'ranger.service.https.port' in services['configurations']['ranger-admin-site']['properties']:
+        port = services['configurations']['ranger-admin-site']['properties']['ranger.service.https.port']
+      # In Ranger-0.4.0 port stored in ranger-site https.service.port
+      elif 'ranger-site' in services['configurations'] and \
+          'https.service.port' in services['configurations']['ranger-site']['properties']:
+        port = services['configurations']['ranger-site']['properties']['https.service.port']
+    else:
+      # HTTP protocol is used
+      # Starting Ranger-0.5.0.2.3 port stored in ranger-admin-site ranger.service.http.port
+      if 'ranger-admin-site' in services['configurations'] and \
+          'ranger.service.http.port' in services['configurations']['ranger-admin-site']['properties']:
+        port = services['configurations']['ranger-admin-site']['properties']['ranger.service.http.port']
+      # In Ranger-0.4.0 port stored in ranger-site http.service.port
+      elif 'ranger-site' in services['configurations'] and \
+          'http.service.port' in services['configurations']['ranger-site']['properties']:
+        port = services['configurations']['ranger-site']['properties']['http.service.port']
+
+    ranger_admin_hosts = self.getComponentHostNames(services, "RANGER", "RANGER_ADMIN")
+    if ranger_admin_hosts:
+      if len(ranger_admin_hosts) > 1 \
+        and services['configurations'] \
+        and 'admin-properties' in services['configurations'] and 'policymgr_external_url' in services['configurations']['admin-properties']['properties'] \
+        and services['configurations']['admin-properties']['properties']['policymgr_external_url'] \
+        and services['configurations']['admin-properties']['properties']['policymgr_external_url'].strip():
+
+        # in case of HA deployment keep the policymgr_external_url specified in the config
+        policymgr_external_url = services['configurations']['admin-properties']['properties']['policymgr_external_url']
+      else:
+
+        ranger_admin_host = ranger_admin_hosts[0]
+        policymgr_external_url = "%s://%s:%s" % (protocol, ranger_admin_host, port)
+
+      putRangerAdminProperty('policymgr_external_url', policymgr_external_url)
+
+    rangerServiceVersion = [service['StackServices']['service_version'] for service in services["services"] if service['StackServices']['service_name'] == 'RANGER'][0]
+    if rangerServiceVersion == '0.4.0':
+      # Recommend ldap settings based on ambari.properties configuration
+      # If 'ambari.ldap.isConfigured' == true
+      # For Ranger version 0.4.0
+      if 'ambari-server-properties' in services and \
+      'ambari.ldap.isConfigured' in services['ambari-server-properties'] and \
+        services['ambari-server-properties']['ambari.ldap.isConfigured'].lower() == "true":
+        putUserSyncProperty = self.putProperty(configurations, "usersync-properties", services)
+        serverProperties = services['ambari-server-properties']
+        if 'authentication.ldap.managerDn' in serverProperties:
+          putUserSyncProperty('SYNC_LDAP_BIND_DN', serverProperties['authentication.ldap.managerDn'])
+        if 'authentication.ldap.primaryUrl' in serverProperties:
+          ldap_protocol =  'ldap://'
+          if 'authentication.ldap.useSSL' in serverProperties and serverProperties['authentication.ldap.useSSL'] == 'true':
+            ldap_protocol =  'ldaps://'
+          ldapUrl = ldap_protocol + serverProperties['authentication.ldap.primaryUrl'] if serverProperties['authentication.ldap.primaryUrl'] else serverProperties['authentication.ldap.primaryUrl']
+          putUserSyncProperty('SYNC_LDAP_URL', ldapUrl)
+        if 'authentication.ldap.userObjectClass' in serverProperties:
+          putUserSyncProperty('SYNC_LDAP_USER_OBJECT_CLASS', serverProperties['authentication.ldap.userObjectClass'])
+        if 'authentication.ldap.usernameAttribute' in serverProperties:
+          putUserSyncProperty('SYNC_LDAP_USER_NAME_ATTRIBUTE', serverProperties['authentication.ldap.usernameAttribute'])
+
+
+      # Set Ranger Admin Authentication method
+      if 'admin-properties' in services['configurations'] and 'usersync-properties' in services['configurations'] and \
+          'SYNC_SOURCE' in services['configurations']['usersync-properties']['properties']:
+        rangerUserSyncSource = services['configurations']['usersync-properties']['properties']['SYNC_SOURCE']
+        authenticationMethod = rangerUserSyncSource.upper()
+        if authenticationMethod != 'FILE':
+          putRangerAdminProperty('authentication_method', authenticationMethod)
+
+      # Recommend xasecure.audit.destination.hdfs.dir
+      # For Ranger version 0.4.0
+      servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+      putRangerEnvProperty = self.putProperty(configurations, "ranger-env", services)
+      include_hdfs = "HDFS" in servicesList
+      if include_hdfs:
+        if 'core-site' in services['configurations'] and ('fs.defaultFS' in services['configurations']['core-site']['properties']):
+          default_fs = services['configurations']['core-site']['properties']['fs.defaultFS']
+          default_fs += '/ranger/audit/%app-type%/%time:yyyyMMdd%'
+          putRangerEnvProperty('xasecure.audit.destination.hdfs.dir', default_fs)
+
+      # Recommend Ranger Audit properties for ranger supported services
+      # For Ranger version 0.4.0
+      ranger_services = [
+        {'service_name': 'HDFS', 'audit_file': 'ranger-hdfs-plugin-properties'},
+        {'service_name': 'HBASE', 'audit_file': 'ranger-hbase-plugin-properties'},
+        {'service_name': 'HIVE', 'audit_file': 'ranger-hive-plugin-properties'},
+        {'service_name': 'KNOX', 'audit_file': 'ranger-knox-plugin-properties'},
+        {'service_name': 'STORM', 'audit_file': 'ranger-storm-plugin-properties'}
+      ]
+
+      for item in range(len(ranger_services)):
+        if ranger_services[item]['service_name'] in servicesList:
+          component_audit_file =  ranger_services[item]['audit_file']
+          if component_audit_file in services["configurations"]:
+            ranger_audit_dict = [
+              {'filename': 'ranger-env', 'configname': 'xasecure.audit.destination.db', 'target_configname': 'XAAUDIT.DB.IS_ENABLED'},
+              {'filename': 'ranger-env', 'configname': 'xasecure.audit.destination.hdfs', 'target_configname': 'XAAUDIT.HDFS.IS_ENABLED'},
+              {'filename': 'ranger-env', 'configname': 'xasecure.audit.destination.hdfs.dir', 'target_configname': 'XAAUDIT.HDFS.DESTINATION_DIRECTORY'}
+            ]
+            putRangerAuditProperty = self.putProperty(configurations, component_audit_file, services)
+
+            for item in ranger_audit_dict:
+              if item['filename'] in services["configurations"] and item['configname'] in  services["configurations"][item['filename']]["properties"]:
+                if item['filename'] in configurations and item['configname'] in  configurations[item['filename']]["properties"]:
+                  rangerAuditProperty = configurations[item['filename']]["properties"][item['configname']]
+                else:
+                  rangerAuditProperty = services["configurations"][item['filename']]["properties"][item['configname']]
+                putRangerAuditProperty(item['target_configname'], rangerAuditProperty)
+
+
+  def getAmsMemoryRecommendation(self, services, hosts):
+    # MB per sink in hbase heapsize
+    HEAP_PER_MASTER_COMPONENT = 50
+    HEAP_PER_SLAVE_COMPONENT = 10
+
+    schMemoryMap = {
+      "HDFS": {
+        "NAMENODE": HEAP_PER_MASTER_COMPONENT,
+        "DATANODE": HEAP_PER_SLAVE_COMPONENT
+      },
+      "YARN": {
+        "RESOURCEMANAGER": HEAP_PER_MASTER_COMPONENT,
+      },
+      "HBASE": {
+        "HBASE_MASTER": HEAP_PER_MASTER_COMPONENT,
+        "HBASE_REGIONSERVER": HEAP_PER_SLAVE_COMPONENT
+      },
+      "ACCUMULO": {
+        "ACCUMULO_MASTER": HEAP_PER_MASTER_COMPONENT,
+        "ACCUMULO_TSERVER": HEAP_PER_SLAVE_COMPONENT
+      },
+      "KAFKA": {
+        "KAFKA_BROKER": HEAP_PER_MASTER_COMPONENT
+      },
+      "FLUME": {
+        "FLUME_HANDLER": HEAP_PER_SLAVE_COMPONENT
+      },
+      "STORM": {
+        "NIMBUS": HEAP_PER_MASTER_COMPONENT,
+      },
+      "AMBARI_METRICS": {
+        "METRICS_COLLECTOR": HEAP_PER_MASTER_COMPONENT,
+        "METRICS_MONITOR": HEAP_PER_SLAVE_COMPONENT
+      }
+    }
+    total_sinks_count = 0
+    # minimum heap size
+    hbase_heapsize = 500
+    for serviceName, componentsDict in schMemoryMap.items():
+      for componentName, multiplier in componentsDict.items():
+        schCount = len(
+          self.getHostsWithComponent(serviceName, componentName, services,
+                                     hosts))
+        hbase_heapsize += int((schCount * multiplier) ** 0.9)
+        total_sinks_count += schCount
+    collector_heapsize = int(hbase_heapsize/4 if hbase_heapsize > 2048 else 512)
+
+    return round_to_n(collector_heapsize), round_to_n(hbase_heapsize), total_sinks_count
+
+  def recommendStormConfigurations(self, configurations, clusterData, services, hosts):
+    putStormSiteProperty = self.putProperty(configurations, "storm-site", services)
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    # Storm AMS integration
+    if 'AMBARI_METRICS' in servicesList:
+      putStormSiteProperty('metrics.reporter.register', 'org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter')
+
+  def recommendAmsConfigurations(self, configurations, clusterData, services, hosts):
+    putAmsEnvProperty = self.putProperty(configurations, "ams-env", services)
+    putAmsHbaseSiteProperty = self.putProperty(configurations, "ams-hbase-site", services)
+    putAmsSiteProperty = self.putProperty(configurations, "ams-site", services)
+    putHbaseEnvProperty = self.putProperty(configurations, "ams-hbase-env", services)
+    putGrafanaProperty = self.putProperty(configurations, "ams-grafana-env", services)
+    putGrafanaPropertyAttribute = self.putPropertyAttribute(configurations, "ams-grafana-env")
+
+    amsCollectorHosts = self.getComponentHostNames(services, "AMBARI_METRICS", "METRICS_COLLECTOR")
+
+    if 'cluster-env' in services['configurations'] and \
+        'metrics_collector_vip_host' in services['configurations']['cluster-env']['properties']:
+      metric_collector_host = services['configurations']['cluster-env']['properties']['metrics_collector_vip_host']
+    else:
+      metric_collector_host = 'localhost' if len(amsCollectorHosts) == 0 else amsCollectorHosts[0]
+
+    putAmsSiteProperty("timeline.metrics.service.webapp.address", str(metric_collector_host) + ":6188")
+
+    log_dir = "/var/log/ambari-metrics-collector"
+    if "ams-env" in services["configurations"]:
+      if "metrics_collector_log_dir" in services["configurations"]["ams-env"]["properties"]:
+        log_dir = services["configurations"]["ams-env"]["properties"]["metrics_collector_log_dir"]
+      putHbaseEnvProperty("hbase_log_dir", log_dir)
+
+    defaultFs = 'file:///'
+    if "core-site" in services["configurations"] and \
+      "fs.defaultFS" in services["configurations"]["core-site"]["properties"]:
+      defaultFs = services["configurations"]["core-site"]["properties"]["fs.defaultFS"]
+
+    operatingMode = "embedded"
+    if "ams-site" in services["configurations"]:
+      if "timeline.metrics.service.operation.mode" in services["configurations"]["ams-site"]["properties"]:
+        operatingMode = services["configurations"]["ams-site"]["properties"]["timeline.metrics.service.operation.mode"]
+
+    if operatingMode == "distributed":
+      putAmsSiteProperty("timeline.metrics.service.watcher.disabled", 'true')
+      putAmsHbaseSiteProperty("hbase.cluster.distributed", 'true')
+    else:
+      putAmsSiteProperty("timeline.metrics.service.watcher.disabled", 'false')
+      putAmsHbaseSiteProperty("hbase.cluster.distributed", 'false')
+
+    rootDir = "file:///var/lib/ambari-metrics-collector/hbase"
+    tmpDir = "/var/lib/ambari-metrics-collector/hbase-tmp"
+    zk_port_default = []
+    if "ams-hbase-site" in services["configurations"]:
+      if "hbase.rootdir" in services["configurations"]["ams-hbase-site"]["properties"]:
+        rootDir = services["configurations"]["ams-hbase-site"]["properties"]["hbase.rootdir"]
+      if "hbase.tmp.dir" in services["configurations"]["ams-hbase-site"]["properties"]:
+        tmpDir = services["configurations"]["ams-hbase-site"]["properties"]["hbase.tmp.dir"]
+      if "hbase.zookeeper.property.clientPort" in services["configurations"]["ams-hbase-site"]["properties"]:
+        zk_port_default = services["configurations"]["ams-hbase-site"]["properties"]["hbase.zookeeper.property.clientPort"]
+
+      # Skip recommendation item if default value is present
+    if operatingMode == "distributed" and not "{{zookeeper_clientPort}}" in zk_port_default:
+      zkPort = self.getZKPort(services)
+      putAmsHbaseSiteProperty("hbase.zookeeper.property.clientPort", zkPort)
+    elif operatingMode == "embedded" and not "{{zookeeper_clientPort}}" in zk_port_default:
+      putAmsHbaseSiteProperty("hbase.zookeeper.property.clientPort", "61181")
+
+    mountpoints = ["/"]
+    for collectorHostName in amsCollectorHosts:
+      for host in hosts["items"]:
+        if host["Hosts"]["host_name"] == collectorHostName:
+          mountpoints = self.getPreferredMountPoints(host["Hosts"])
+          break
+    isLocalRootDir = rootDir.startswith("file://") or (defaultFs.startswith("file://") and rootDir.startswith("/"))
+    if isLocalRootDir:
+      rootDir = re.sub("^file:///|/", "", rootDir, count=1)
+      rootDir = "file://" + os.path.join(mountpoints[0], rootDir)
+    tmpDir = re.sub("^file:///|/", "", tmpDir, count=1)
+    if len(mountpoints) > 1 and isLocalRootDir:
+      tmpDir = os.path.join(mountpoints[1], tmpDir)
+    else:
+      tmpDir = os.path.join(mountpoints[0], tmpDir)
+    putAmsHbaseSiteProperty("hbase.tmp.dir", tmpDir)
+
+    if operatingMode == "distributed":
+      putAmsHbaseSiteProperty("hbase.rootdir", defaultFs + "/user/ams/hbase")
+
+    if operatingMode == "embedded":
+      if isLocalRootDir:
+        putAmsHbaseSiteProperty("hbase.rootdir", rootDir)
+      else:
+        putAmsHbaseSiteProperty("hbase.rootdir", "file:///var/lib/ambari-metrics-collector/hbase")
+
+    collector_heapsize, hbase_heapsize, total_sinks_count = self.getAmsMemoryRecommendation(services, hosts)
+
+    putAmsEnvProperty("metrics_collector_heapsize", collector_heapsize)
+
+    # blockCache = 0.3, memstore = 0.35, phoenix-server = 0.15, phoenix-client = 0.25
+    putAmsHbaseSiteProperty("hfile.block.cache.size", 0.3)
+    putAmsHbaseSiteProperty("hbase.hregion.memstore.flush.size", 134217728)
+    putAmsHbaseSiteProperty("hbase.regionserver.global.memstore.upperLimit", 0.35)
+    putAmsHbaseSiteProperty("hbase.regionserver.global.memstore.lowerLimit", 0.3)
+
+    if len(amsCollectorHosts) > 1:
+      pass
+    else:
+      # blockCache = 0.3, memstore = 0.3, phoenix-server = 0.2, phoenix-client = 0.3
+      if total_sinks_count >= 2000:
+        putAmsHbaseSiteProperty("hbase.regionserver.handler.count", 60)
+        putAmsHbaseSiteProperty("hbase.regionserver.hlog.blocksize", 134217728)
+        putAmsHbaseSiteProperty("hbase.regionserver.maxlogs", 64)
+        putAmsHbaseSiteProperty("hbase.hregion.memstore.flush.size", 268435456)
+        putAmsHbaseSiteProperty("hbase.regionserver.global.memstore.upperLimit", 0.3)
+        putAmsHbaseSiteProperty("hbase.regionserver.global.memstore.lowerLimit", 0.25)
+        putAmsHbaseSiteProperty("phoenix.query.maxGlobalMemoryPercentage", 20)
+        putAmsHbaseSiteProperty("phoenix.coprocessor.maxMetaDataCacheSize", 81920000)
+        putAmsSiteProperty("phoenix.query.maxGlobalMemoryPercentage", 30)
+        putAmsSiteProperty("timeline.metrics.service.resultset.fetchSize", 10000)
+      elif total_sinks_count >= 500:
+        putAmsHbaseSiteProperty("hbase.regionserver.handler.count", 60)
+        putAmsHbaseSiteProperty("hbase.regionserver.hlog.blocksize", 134217728)
+        putAmsHbaseSiteProperty("hbase.regionserver.maxlogs", 64)
+        putAmsHbaseSiteProperty("hbase.hregion.memstore.flush.size", 268435456)
+        putAmsHbaseSiteProperty("phoenix.coprocessor.maxMetaDataCacheSize", 40960000)
+        putAmsSiteProperty("timeline.metrics.service.resultset.fetchSize", 5000)
+      else:
+        putAmsHbaseSiteProperty("phoenix.coprocessor.maxMetaDataCacheSize", 20480000)
+      pass
+
+    metrics_api_handlers = min(50, max(20, int(total_sinks_count / 100)))
+    putAmsSiteProperty("timeline.metrics.service.handler.thread.count", metrics_api_handlers)
+
+    # Distributed mode heap size
+    if operatingMode == "distributed":
+      hbase_heapsize = max(hbase_heapsize, 768)
+      putHbaseEnvProperty("hbase_master_heapsize", "512")
+      putHbaseEnvProperty("hbase_master_xmn_size", "102") #20% of 512 heap size
+      putHbaseEnvProperty("hbase_regionserver_heapsize", hbase_heapsize)
+      putHbaseEnvProperty("regionserver_xmn_size", round_to_n(0.15*hbase_heapsize,64))
+    else:
+      # Embedded mode heap size : master + regionserver
+      hbase_rs_heapsize = 768
+      putHbaseEnvProperty("hbase_regionserver_heapsize", hbase_rs_heapsize)
+      putHbaseEnvProperty("hbase_master_heapsize", hbase_heapsize)
+      putHbaseEnvProperty("hbase_master_xmn_size", round_to_n(0.15*(hbase_heapsize+hbase_rs_heapsize),64))
+
+    # If no local DN in distributed mode
+    if operatingMode == "distributed":
+      dn_hosts = self.getComponentHostNames(services, "HDFS", "DATANODE")
+      # call by Kerberos wizard sends only the service being affected
+      # so it is possible for dn_hosts to be None but not amsCollectorHosts
+      if dn_hosts and len(dn_hosts) > 0:
+        if set(amsCollectorHosts).intersection(dn_hosts):
+          collector_cohosted_with_dn = "true"
+        else:
+          collector_cohosted_with_dn = "false"
+        putAmsHbaseSiteProperty("dfs.client.read.shortcircuit", collector_cohosted_with_dn)
+
+    #split points
+    scriptDir = os.path.dirname(os.path.abspath(__file__))
+    metricsDir = os.path.join(scriptDir, '../../../../common-services/AMBARI_METRICS/0.1.0/package')
+    serviceMetricsDir = os.path.join(metricsDir, 'files', 'service-metrics')
+    customServiceMetricsDir = os.path.join(scriptDir, '../../../../dashboards/service-metrics')
+    sys.path.append(os.path.join(metricsDir, 'scripts'))
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+
+    from split_points import FindSplitPointsForAMSRegions
+
+    ams_hbase_site = None
+    ams_hbase_env = None
+
+    # Overriden properties form the UI
+    if "ams-hbase-site" in services["configurations"]:
+      ams_hbase_site = services["configurations"]["ams-hbase-site"]["properties"]
+    if "ams-hbase-env" in services["configurations"]:
+       ams_hbase_env = services["configurations"]["ams-hbase-env"]["properties"]
+
+    # Recommendations
+    if not ams_hbase_site:
+      ams_hbase_site = configurations["ams-hbase-site"]["properties"]
+    if not ams_hbase_env:
+      ams_hbase_env = configurations["ams-hbase-env"]["properties"]
+
+    split_point_finder = FindSplitPointsForAMSRegions(
+      ams_hbase_site, ams_hbase_env, serviceMetricsDir, customServiceMetricsDir, operatingMode, servicesList)
+
+    result = split_point_finder.get_split_points()
+    precision_splits = ' '
+    aggregate_splits = ' '
+    if result.precision:
+      precision_splits = result.precision
+    if result.aggregate:
+      aggregate_splits = result.aggregate
+    putAmsSiteProperty("timeline.metrics.host.aggregate.splitpoints", ','.join(precision_splits))
+    putAmsSiteProperty("timeline.metrics.cluster.aggregate.splitpoints", ','.join(aggregate_splits))
+
+    component_grafana_exists = False
+    for service in services['services']:
+      if 'components' in service:
+        for component in service['components']:
+          if 'StackServiceComponents' in component:
+            # If Grafana is installed the hostnames would indicate its location
+            if 'METRICS_GRAFANA' in component['StackServiceComponents']['component_name'] and\
+              len(component['StackServiceComponents']['hostnames']) != 0:
+              component_grafana_exists = True
+              break
+    pass
+
+    if not component_grafana_exists:
+      putGrafanaPropertyAttribute("metrics_grafana_password", "visible", "false")
+
+    pass
+
+  def getHostNamesWithComponent(self, serviceName, componentName, services):
+    """
+    Returns the list of hostnames on which service component is installed
+    """
+    if services is not None and serviceName in [service["StackServices"]["service_name"] for service in services["services"]]:
+      service = [serviceEntry for serviceEntry in services["services"] if serviceEntry["StackServices"]["service_name"] == serviceName][0]
+      components = [componentEntry for componentEntry in service["components"] if componentEntry["StackServiceComponents"]["component_name"] == componentName]
+      if (len(components) > 0 and len(components[0]["StackServiceComponents"]["hostnames"]) > 0):
+        componentHostnames = components[0]["StackServiceComponents"]["hostnames"]
+        return componentHostnames
+    return []
+
+  def getHostsWithComponent(self, serviceName, componentName, services, hosts):
+    if services is not None and hosts is not None and serviceName in [service["StackServices"]["service_name"] for service in services["services"]]:
+      service = [serviceEntry for serviceEntry in services["services"] if serviceEntry["StackServices"]["service_name"] == serviceName][0]
+      components = [componentEntry for componentEntry in service["components"] if componentEntry["StackServiceComponents"]["component_name"] == componentName]
+      if (len(components) > 0 and len(components[0]["StackServiceComponents"]["hostnames"]) > 0):
+        componentHostnames = components[0]["StackServiceComponents"]["hostnames"]
+        componentHosts = [host for host in hosts["items"] if host["Hosts"]["host_name"] in componentHostnames]
+        return componentHosts
+    return []
+
+  def getHostWithComponent(self, serviceName, componentName, services, hosts):
+    componentHosts = self.getHostsWithComponent(serviceName, componentName, services, hosts)
+    if (len(componentHosts) > 0):
+      return componentHosts[0]
+    return None
+
+  def getHostComponentsByCategories(self, hostname, categories, services, hosts):
+    components = []
+    if services is not None and hosts is not None:
+      for service in services["services"]:
+          components.extend([componentEntry for componentEntry in service["components"]
+                              if componentEntry["StackServiceComponents"]["component_category"] in categories
+                              and hostname in componentEntry["StackServiceComponents"]["hostnames"]])
+    return components
+
+  def getZKHostPortString(self, services, include_port=True):
+    """
+    Returns the comma delimited string of zookeeper server host with the configure port installed in a cluster
+    Example: zk.host1.org:2181,zk.host2.org:2181,zk.host3.org:2181
+    include_port boolean param -> If port is also needed.
+    """
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    include_zookeeper = "ZOOKEEPER" in servicesList
+    zookeeper_host_port = ''
+
+    if include_zookeeper:
+      zookeeper_hosts = self.getHostNamesWithComponent("ZOOKEEPER", "ZOOKEEPER_SERVER", services)
+      zookeeper_host_port_arr = []
+
+      if include_port:
+        zookeeper_port = self.getZKPort(services)
+        for i in range(len(zookeeper_hosts)):
+          zookeeper_host_port_arr.append(zookeeper_hosts[i] + ':' + zookeeper_port)
+      else:
+        for i in range(len(zookeeper_hosts)):
+          zookeeper_host_port_arr.append(zookeeper_hosts[i])
+
+      zookeeper_host_port = ",".join(zookeeper_host_port_arr)
+    return zookeeper_host_port
+
+  def getZKPort(self, services):
+    zookeeper_port = '2181'     #default port
+    if 'zoo.cfg' in services['configurations'] and ('clientPort' in services['configurations']['zoo.cfg']['properties']):
+      zookeeper_port = services['configurations']['zoo.cfg']['properties']['clientPort']
+    return zookeeper_port
+
+  def getConfigurationClusterSummary(self, servicesList, hosts, components, services):
+
+    hBaseInstalled = False
+    if 'HBASE' in servicesList:
+      hBaseInstalled = True
+
+    cluster = {
+      "cpu": 0,
+      "disk": 0,
+      "ram": 0,
+      "hBaseInstalled": hBaseInstalled,
+      "components": components
+    }
+
+    if len(hosts["items"]) > 0:
+      nodeManagerHosts = self.getHostsWithComponent("YARN", "NODEMANAGER", services, hosts)
+      # NodeManager host with least memory is generally used in calculations as it will work in larger hosts.
+      if nodeManagerHosts is not None and len(nodeManagerHosts) > 0:
+        nodeManagerHost = nodeManagerHosts[0];
+        for nmHost in nodeManagerHosts:
+          if nmHost["Hosts"]["total_mem"] < nodeManagerHost["Hosts"]["total_mem"]:
+            nodeManagerHost = nmHost
+        host = nodeManagerHost["Hosts"]
+        cluster["referenceNodeManagerHost"] = host
+      else:
+        host = hosts["items"][0]["Hosts"]
+      cluster["referenceHost"] = host
+      cluster["cpu"] = host["cpu_count"]
+      cluster["disk"] = len(host["disk_info"])
+      cluster["ram"] = int(host["total_mem"] / (1024 * 1024))
+
+    ramRecommendations = [
+      {"os":1, "hbase":1},
+      {"os":2, "hbase":1},
+      {"os":2, "hbase":2},
+      {"os":4, "hbase":4},
+      {"os":6, "hbase":8},
+      {"os":8, "hbase":8},
+      {"os":8, "hbase":8},
+      {"os":12, "hbase":16},
+      {"os":24, "hbase":24},
+      {"os":32, "hbase":32},
+      {"os":64, "hbase":32}
+    ]
+    index = {
+      cluster["ram"] <= 4: 0,
+      4 < cluster["ram"] <= 8: 1,
+      8 < cluster["ram"] <= 16: 2,
+      16 < cluster["ram"] <= 24: 3,
+      24 < cluster["ram"] <= 48: 4,
+      48 < cluster["ram"] <= 64: 5,
+      64 < cluster["ram"] <= 72: 6,
+      72 < cluster["ram"] <= 96: 7,
+      96 < cluster["ram"] <= 128: 8,
+      128 < cluster["ram"] <= 256: 9,
+      256 < cluster["ram"]: 10
+    }[1]
+
+
+    cluster["reservedRam"] = ramRecommendations[index]["os"]
+    cluster["hbaseRam"] = ramRecommendations[index]["hbase"]
+
+
+    cluster["minContainerSize"] = {
+      cluster["ram"] <= 4: 256,
+      4 < cluster["ram"] <= 8: 512,
+      8 < cluster["ram"] <= 24: 1024,
+      24 < cluster["ram"]: 2048
+    }[1]
+
+    totalAvailableRam = cluster["ram"] - cluster["reservedRam"]
+    if cluster["hBaseInstalled"]:
+      totalAvailableRam -= cluster["hbaseRam"]
+    cluster["totalAvailableRam"] = max(512, totalAvailableRam * 1024)
+    '''containers = max(3, min (2*cores,min (1.8*DISKS,(Total available RAM) / MIN_CONTAINER_SIZE))))'''
+    cluster["containers"] = round(max(3,
+                                min(2 * cluster["cpu"],
+                                    min(ceil(1.8 * cluster["disk"]),
+                                            cluster["totalAvailableRam"] / cluster["minContainerSize"]))))
+
+    '''ramPerContainers = max(2GB, RAM - reservedRam - hBaseRam) / containers'''
+    cluster["ramPerContainer"] = abs(cluster["totalAvailableRam"] / cluster["containers"])
+    '''If greater than 1GB, value will be in multiples of 512.'''
+    if cluster["ramPerContainer"] > 1024:
+      cluster["ramPerContainer"] = int(cluster["ramPerContainer"] / 512) * 512
+
+    cluster["mapMemory"] = int(cluster["ramPerContainer"])
+    cluster["reduceMemory"] = cluster["ramPerContainer"]
+    cluster["amMemory"] = max(cluster["mapMemory"], cluster["reduceMemory"])
+
+    return cluster
+
+  def getServiceConfigurationValidators(self):
+    return {
+      "HDFS": { "hdfs-site": self.validateHDFSConfigurations,
+                "hadoop-env": self.validateHDFSConfigurationsEnv},
+      "MAPREDUCE2": {"mapred-site": self.validateMapReduce2Configurations},
+      "YARN": {"yarn-site": self.validateYARNConfigurations,
+               "yarn-env": self.validateYARNEnvConfigurations},
+      "HBASE": {"hbase-env": self.validateHbaseEnvConfigurations},
+      "STORM": {"storm-site": self.validateStormConfigurations},
+      "AMBARI_METRICS": {"ams-hbase-site": self.validateAmsHbaseSiteConfigurations,
+              "ams-hbase-env": self.validateAmsHbaseEnvConfigurations,
+              "ams-site": self.validateAmsSiteConfigurations,
+              "ams-env": self.validateAmsEnvConfigurations}
+    }
+
+  def validateMinMax(self, items, recommendedDefaults, configurations):
+
+    # required for casting to the proper numeric type before comparison
+    def convertToNumber(number):
+      try:
+        return int(number)
+      except ValueError:
+        return float(number)
+
+    for configName in configurations:
+      validationItems = []
+      if configName in recommendedDefaults and "property_attributes" in recommendedDefaults[configName]:
+        for propertyName in recommendedDefaults[configName]["property_attributes"]:
+          if propertyName in configurations[configName]["properties"]:
+            if "maximum" in recommendedDefaults[configName]["property_attributes"][propertyName] and \
+                propertyName in recommendedDefaults[configName]["properties"]:
+              userValue = convertToNumber(configurations[configName]["properties"][propertyName])
+              maxValue = convertToNumber(recommendedDefaults[configName]["property_attributes"][propertyName]["maximum"])
+              if userValue > maxValue:
+                validationItems.extend([{"config-name": propertyName, "item": self.getWarnItem("Value is greater than the recommended maximum of {0} ".format(maxValue))}])
+            if "minimum" in recommendedDefaults[configName]["property_attributes"][propertyName] and \
+                    propertyName in recommendedDefaults[configName]["properties"]:
+              userValue = convertToNumber(configurations[configName]["properties"][propertyName])
+              minValue = convertToNumber(recommendedDefaults[configName]["property_attributes"][propertyName]["minimum"])
+              if userValue < minValue:
+                validationItems.extend([{"config-name": propertyName, "item": self.getWarnItem("Value is less than the recommended minimum of {0} ".format(minValue))}])
+      items.extend(self.toConfigurationValidationProblems(validationItems, configName))
+    pass
+
+  def validateAmsSiteConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = []
+
+    op_mode = properties.get("timeline.metrics.service.operation.mode")
+    correct_op_mode_item = None
+    if op_mode not in ("embedded", "distributed"):
+      correct_op_mode_item = self.getErrorItem("Correct value should be set.")
+      pass
+
+    validationItems.extend([{"config-name":'timeline.metrics.service.operation.mode', "item": correct_op_mode_item }])
+    return self.toConfigurationValidationProblems(validationItems, "ams-site")
+
+  def validateAmsHbaseSiteConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+
+    amsCollectorHosts = self.getComponentHostNames(services, "AMBARI_METRICS", "METRICS_COLLECTOR")
+    ams_site = getSiteProperties(configurations, "ams-site")
+    core_site = getSiteProperties(configurations, "core-site")
+
+    collector_heapsize, hbase_heapsize, total_sinks_count = self.getAmsMemoryRecommendation(services, hosts)
+    recommendedDiskSpace = 10485760
+    # TODO validate configuration for multiple AMBARI_METRICS collectors
+    if len(amsCollectorHosts) > 1:
+      pass
+    else:
+      if total_sinks_count > 2000:
+        recommendedDiskSpace  = 104857600  # * 1k == 100 Gb
+      elif total_sinks_count > 500:
+        recommendedDiskSpace  = 52428800  # * 1k == 50 Gb
+      elif total_sinks_count > 250:
+        recommendedDiskSpace  = 20971520  # * 1k == 20 Gb
+
+    validationItems = []
+
+    rootdir_item = None
+    op_mode = ams_site.get("timeline.metrics.service.operation.mode")
+    default_fs = core_site.get("fs.defaultFS") if core_site else "file:///"
+    hbase_rootdir = properties.get("hbase.rootdir")
+    hbase_tmpdir = properties.get("hbase.tmp.dir")
+    distributed = properties.get("hbase.cluster.distributed")
+    is_local_root_dir = hbase_rootdir.startswith("file://") or (default_fs.startswith("file://") and hbase_rootdir.startswith("/"))
+
+    if op_mode == "distributed" and is_local_root_dir:
+      rootdir_item = self.getWarnItem("In distributed mode hbase.rootdir should point to HDFS.")
+    elif op_mode == "embedded":
+      if distributed.lower() == "false" and hbase_rootdir.startswith('/') or hbase_rootdir.startswith("hdfs://"):
+        rootdir_item = self.getWarnItem("In embedded mode hbase.rootdir cannot point to schemaless values or HDFS, "
+                                        "Example - file:// for localFS")
+      pass
+
+    distributed_item = None
+    if op_mode == "distributed" and not distributed.lower() == "true":
+      distributed_item = self.getErrorItem("hbase.cluster.distributed property should be set to true for "
+                                           "distributed mode")
+    if op_mode == "embedded" and distributed.lower() == "true":
+      distributed_item = self.getErrorItem("hbase.cluster.distributed property should be set to false for embedded mode")
+
+    hbase_zk_client_port = properties.get("hbase.zookeeper.property.clientPort")
+    zkPort = self.getZKPort(services)
+    hbase_zk_client_port_item = None
+    if distributed.lower() == "true" and op_mode == "distributed" and \
+        hbase_zk_client_port != zkPort and hbase_zk_client_port != "{{zookeeper_clientPort}}":
+      hbase_zk_client_port_item = self.getErrorItem("In AMS distributed mode, hbase.zookeeper.property.clientPort "
+                                                    "should be the cluster zookeeper server port : {0}".format(zkPort))
+
+    if distributed.lower() == "false" and op_mode == "embedded" and \
+        hbase_zk_client_port == zkPort and hbase_zk_client_port != "{{zookeeper_clientPort}}":
+      hbase_zk_client_port_item = self.getErrorItem("In AMS embedded mode, hbase.zookeeper.property.clientPort "
+                                                    "should be a different port than cluster zookeeper port."
+                                                    "(default:61181)")
+
+    validationItems.extend([{"config-name":'hbase.rootdir', "item": rootdir_item },
+                            {"config-name":'hbase.cluster.distributed', "item": distributed_item },
+                            {"config-name":'hbase.zookeeper.property.clientPort', "item": hbase_zk_client_port_item }])
+
+    for collectorHostName in amsCollectorHosts:
+      for host in hosts["items"]:
+        if host["Hosts"]["host_name"] == collectorHostName:
+          if op_mode == 'embedded' or is_local_root_dir:
+            validationItems.extend([{"config-name": 'hbase.rootdir', "item": self.validatorEnoughDiskSpace(properties, 'hbase.rootdir', host["Hosts"], recommendedDiskSpace)}])
+            validationItems.extend([{"config-name": 'hbase.rootdir', "item": self.validatorNotRootFs(properties, recommendedDefaults, 'hbase.rootdir', host["Hosts"])}])
+            validationItems.extend([{"config-name": 'hbase.tmp.dir', "item": self.validatorNotRootFs(properties, recommendedDefaults, 'hbase.tmp.dir', host["Hosts"])}])
+
+          dn_hosts = self.getComponentHostNames(services, "HDFS", "DATANODE")
+          if is_local_root_dir:
+            mountPoints = []
+            for mountPoint in host["Hosts"]["disk_info"]:
+              mountPoints.append(mountPoint["mountpoint"])
+            hbase_rootdir_mountpoint = getMountPointForDir(hbase_rootdir, mountPoints)
+            hbase_tmpdir_mountpoint = getMountPointForDir(hbase_tmpdir, mountPoints)
+            preferred_mountpoints = self.getPreferredMountPoints(host['Hosts'])
+            # hbase.rootdir and hbase.tmp.dir shouldn't point to the same partition
+            # if multiple preferred_mountpoints exist
+            if hbase_rootdir_mountpoint == hbase_tmpdir_mountpoint and \
+              len(preferred_mountpoints) > 1:
+              item = self.getWarnItem("Consider not using {0} partition for storing metrics temporary data. "
+                                      "{0} partition is already used as hbase.rootdir to store metrics data".format(hbase_tmpdir_mountpoint))
+              validationItems.extend([{"config-name":'hbase.tmp.dir', "item": item}])
+
+            # if METRICS_COLLECTOR is co-hosted with DATANODE
+            # cross-check dfs.datanode.data.dir and hbase.rootdir
+            # they shouldn't share same disk partition IO
+            hdfs_site = getSiteProperties(configurations, "hdfs-site")
+            dfs_datadirs = hdfs_site.get("dfs.datanode.data.dir").split(",") if hdfs_site and "dfs.datanode.data.dir" in hdfs_site else []
+            if dn_hosts and collectorHostName in dn_hosts and ams_site and \
+              dfs_datadirs and len(preferred_mountpoints) > len(dfs_datadirs):
+              for dfs_datadir in dfs_datadirs:
+                dfs_datadir_mountpoint = getMountPointForDir(dfs_datadir, mountPoints)
+                if dfs_datadir_mountpoint == hbase_rootdir_mountpoint:
+                  item = self.getWarnItem("Consider not using {0} partition for storing metrics data. "
+                                          "{0} is already used by datanode to store HDFS data".format(hbase_rootdir_mountpoint))
+                  validationItems.extend([{"config-name": 'hbase.rootdir', "item": item}])
+                  break
+          # If no local DN in distributed mode
+          elif collectorHostName not in dn_hosts and distributed.lower() == "true":
+            item = self.getWarnItem("It's recommended to install Datanode component on {0} "
+                                    "to speed up IO operations between HDFS and Metrics "
+                                    "Collector in distributed mode ".format(collectorHostName))
+            validationItems.extend([{"config-name": "hbase.cluster.distributed", "item": item}])
+          # Short circuit read should be enabled in distibuted mode
+          # if local DN installed
+          else:
+            validationItems.extend([{"config-name": "dfs.client.read.shortcircuit", "item": self.validatorEqualsToRecommendedItem(properties, recommendedDefaults, "dfs.client.read.shortcircuit")}])
+
+    return self.toConfigurationValidationProblems(validationItems, "ams-hbase-site")
+
+  def validateStormConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = []
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    # Storm AMS integration
+    if 'AMBARI_METRICS' in servicesList and "metrics.reporter.register" in properties and \
+      "org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter" not in properties.get("metrics.reporter.register"):
+
+      validationItems.append({"config-name": 'metrics.reporter.register',
+                              "item": self.getWarnItem(
+                                "Should be set to org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter to report the metrics to Ambari Metrics service.")})
+
+    return self.toConfigurationValidationProblems(validationItems, "storm-site")
+
+  def validateAmsHbaseEnvConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+
+    ams_env = getSiteProperties(configurations, "ams-env")
+    amsHbaseSite = getSiteProperties(configurations, "ams-hbase-site")
+    validationItems = []
+    mb = 1024 * 1024
+    gb = 1024 * mb
+
+    regionServerItem = self.validatorLessThenDefaultValue(properties, recommendedDefaults, "hbase_regionserver_heapsize") ## FIXME if new service added
+    if regionServerItem:
+      validationItems.extend([{"config-name": "hbase_regionserver_heapsize", "item": regionServerItem}])
+
+    hbaseMasterHeapsizeItem = self.validatorLessThenDefaultValue(properties, recommendedDefaults, "hbase_master_heapsize")
+    if hbaseMasterHeapsizeItem:
+      validationItems.extend([{"config-name": "hbase_master_heapsize", "item": hbaseMasterHeapsizeItem}])
+
+    logDirItem = self.validatorEqualsPropertyItem(properties, "hbase_log_dir", ams_env, "metrics_collector_log_dir")
+    if logDirItem:
+      validationItems.extend([{"config-name": "hbase_log_dir", "item": logDirItem}])
+
+    hbase_master_heapsize = to_number(properties["hbase_master_heapsize"])
+    hbase_master_xmn_size = to_number(properties["hbase_master_xmn_size"])
+    hbase_regionserver_heapsize = to_number(properties["hbase_regionserver_heapsize"])
+    hbase_regionserver_xmn_size = to_number(properties["regionserver_xmn_size"])
+
+    # Validate Xmn settings.
+    masterXmnItem = None
+    regionServerXmnItem = None
+    is_hbase_distributed = amsHbaseSite.get("hbase.cluster.distributed").lower() == 'true'
+
+    if is_hbase_distributed:
+      minMasterXmn = 0.12 * hbase_master_heapsize
+      maxMasterXmn = 0.2 * hbase_master_heapsize
+      if hbase_master_xmn_size < minMasterXmn:
+        masterXmnItem = self.getWarnItem("Value is lesser than the recommended minimum Xmn size of {0} "
+                                         "(12% of hbase_master_heapsize)".format(int(ceil(minMasterXmn))))
+
+      if hbase_master_xmn_size > maxMasterXmn:
+        masterXmnItem = self.getWarnItem("Value is greater than the recommended maximum Xmn size of {0} "
+                                         "(20% of hbase_master_heapsize)".format(int(floor(maxMasterXmn))))
+
+      minRegionServerXmn = 0.12 * hbase_regionserver_heapsize
+      maxRegionServerXmn = 0.2 * hbase_regionserver_heapsize
+      if hbase_regionserver_xmn_size < minRegionServerXmn:
+        regionServerXmnItem = self.getWarnItem("Value is lesser than the recommended minimum Xmn size of {0} "
+                                               "(12% of hbase_regionserver_heapsize)"
+                                               .format(int(ceil(minRegionServerXmn))))
+
+      if hbase_regionserver_xmn_size > maxRegionServerXmn:
+        regionServerXmnItem = self.getWarnItem("Value is greater than the recommended maximum Xmn size of {0} "
+                                               "(20% of hbase_regionserver_heapsize)"
+                                               .format(int(floor(maxRegionServerXmn))))
+    else:
+      minMasterXmn = 0.12 * (hbase_master_heapsize + hbase_regionserver_heapsize)
+      maxMasterXmn = 0.2 *  (hbase_master_heapsize + hbase_regionserver_heapsize)
+      if hbase_master_xmn_size < minMasterXmn:
+        masterXmnItem = self.getWarnItem("Value is lesser than the recommended minimum Xmn size of {0} "
+                                         "(12% of hbase_master_heapsize + hbase_regionserver_heapsize)"
+                                         .format(int(ceil(minMasterXmn))))
+
+      if hbase_master_xmn_size > maxMasterXmn:
+        masterXmnItem = self.getWarnItem("Value is greater than the recommended maximum Xmn size of {0} "
+                                         "(20% of hbase_master_heapsize + hbase_regionserver_heapsize)"
+                                         .format(int(floor(maxMasterXmn))))
+    if masterXmnItem:
+      validationItems.extend([{"config-name": "hbase_master_xmn_size", "item": masterXmnItem}])
+
+    if regionServerXmnItem:
+      validationItems.extend([{"config-name": "regionserver_xmn_size", "item": regionServerXmnItem}])
+
+    if hbaseMasterHeapsizeItem is None:
+      hostMasterComponents = {}
+
+      for service in services["services"]:
+        for component in service["components"]:
+          if component["StackServiceComponents"]["hostnames"] is not None:
+            for hostName in component["StackServiceComponents"]["hostnames"]:
+              if self.isMasterComponent(component):
+                if hostName not in hostMasterComponents.keys():
+                  hostMasterComponents[hostName] = []
+                hostMasterComponents[hostName].append(component["StackServiceComponents"]["component_name"])
+
+      amsCollectorHosts = self.getComponentHostNames(services, "AMBARI_METRICS", "METRICS_COLLECTOR")
+      for collectorHostName in amsCollectorHosts:
+        for host in hosts["items"]:
+          if host["Hosts"]["host_name"] == collectorHostName:
+            # AMS Collector co-hosted with other master components in bigger clusters
+            if len(hosts['items']) > 31 and \
+                            len(hostMasterComponents[collectorHostName]) > 2 and \
+                            host["Hosts"]["total_mem"] < 32*mb: # < 32Gb(total_mem in k)
+              masterHostMessage = "Host {0} is used by multiple master components ({1}). " \
+                                  "It is recommended to use a separate host for the " \
+                                  "Ambari Metrics Collector component and ensure " \
+                                  "the host has sufficient memory available."
+
+              hbaseMasterHeapsizeItem = self.getWarnItem(masterHostMessage.format(
+                  collectorHostName, str(", ".join(hostMasterComponents[collectorHostName]))))
+              if hbaseMasterHeapsizeItem:
+                validationItems.extend([{"config-name": "hbase_master_heapsize", "item": hbaseMasterHeapsizeItem}])
+
+            # Check for unused RAM on AMS Collector node
+            hostComponents = []
+            for service in services["services"]:
+              for component in service["components"]:
+                if component["StackServiceComponents"]["hostnames"] is not None:
+                  if collectorHostName in component["StackServiceComponents"]["hostnames"]:
+                    hostComponents.append(component["StackServiceComponents"]["component_name"])
+
+            requiredMemory = getMemorySizeRequired(hostComponents, configurations)
+            unusedMemory = host["Hosts"]["total_mem"] * 1024 - requiredMemory # in bytes
+
+            heapPropertyToIncrease = "hbase_regionserver_heapsize" if is_hbase_distributed else "hbase_master_heapsize"
+            xmnPropertyToIncrease = "regionserver_xmn_size" if is_hbase_distributed else "hbase_master_xmn_size"
+            hbase_needs_increase = to_number(properties[heapPropertyToIncrease]) * mb < 32 * gb
+
+            if unusedMemory > 4*gb and hbase_needs_increase:  # warn user, if more than 4GB RAM is unused
+
+              recommended_hbase_heapsize = int((unusedMemory - 4*gb)*4/5) + to_number(properties.get(heapPropertyToIncrease))*mb
+              recommended_hbase_heapsize = min(32*gb, recommended_hbase_heapsize) #Make sure heapsize <= 32GB
+              recommended_hbase_heapsize = round_to_n(recommended_hbase_heapsize/mb,128) # Round to 128m multiple
+              if to_number(properties[heapPropertyToIncrease]) < recommended_hbase_heapsize:
+                hbaseHeapsizeItem = self.getWarnItem("Consider allocating {0} MB to {1} in ams-hbase-env to use up some "
+                                                     "unused memory on host"
+                                                         .format(recommended_hbase_heapsize,
+                                                                 heapPropertyToIncrease))
+                validationItems.extend([{"config-name": heapPropertyToIncrease, "item": hbaseHeapsizeItem}])
+
+              recommended_xmn_size = round_to_n(0.15*recommended_hbase_heapsize,128)
+              if to_number(properties[xmnPropertyToIncrease]) < recommended_xmn_size:
+                xmnPropertyToIncreaseItem = self.getWarnItem("Consider allocating {0} MB to use up some unused memory "
+                                                             "on host".format(recommended_xmn_size))
+                validationItems.extend([{"config-name": xmnPropertyToIncrease, "item": xmnPropertyToIncreaseItem}])
+      pass
+
+    return self.toConfigurationValidationProblems(validationItems, "ams-hbase-env")
+
+  def validateAmsEnvConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+
+    ams_env = getSiteProperties(configurations, "ams-env")
+    mb = 1024 * 1024
+    gb = 1024 * mb
+    validationItems = []
+    collector_heapsize = to_number(ams_env.get("metrics_collector_heapsize"))
+    amsCollectorHosts = self.getComponentHostNames(services, "AMBARI_METRICS", "METRICS_COLLECTOR")
+    for collectorHostName in amsCollectorHosts:
+      for host in hosts["items"]:
+        if host["Hosts"]["host_name"] == collectorHostName:
+          hostComponents = []
+          for service in services["services"]:
+            for component in service["components"]:
+              if component["StackServiceComponents"]["hostnames"] is not None:
+                if collectorHostName in component["StackServiceComponents"]["hostnames"]:
+                  hostComponents.append(component["StackServiceComponents"]["component_name"])
+
+          requiredMemory = getMemorySizeRequired(hostComponents, configurations)
+          unusedMemory = host["Hosts"]["total_mem"] * 1024 - requiredMemory # in bytes
+          collector_needs_increase = collector_heapsize * mb < 16 * gb
+
+          if unusedMemory > 4*gb and collector_needs_increase:  # warn user, if more than 4GB RAM is unused
+            recommended_collector_heapsize = int((unusedMemory - 4*gb)/5) + collector_heapsize * mb
+            recommended_collector_heapsize = round_to_n(recommended_collector_heapsize/mb,128) # Round to 128m multiple
+            if collector_heapsize < recommended_collector_heapsize:
+              validation_msg = "Consider allocating {0} MB to metrics_collector_heapsize in ams-env to use up some " \
+                               "unused memory on host"
+              collectorHeapsizeItem = self.getWarnItem(validation_msg.format(recommended_collector_heapsize))
+              validationItems.extend([{"config-name": "metrics_collector_heapsize", "item": collectorHeapsizeItem}])
+    pass
+    return self.toConfigurationValidationProblems(validationItems, "ams-env")
+
+  def getPreferredMountPoints(self, hostInfo):
+
+    # '/etc/resolv.conf', '/etc/hostname', '/etc/hosts' are docker specific mount points
+    undesirableMountPoints = ["/", "/home", "/etc/resolv.conf", "/etc/hosts",
+                              "/etc/hostname", "/tmp"]
+    undesirableFsTypes = ["devtmpfs", "tmpfs", "vboxsf", "CDFS"]
+    mountPoints = []
+    if hostInfo and "disk_info" in hostInfo:
+      mountPointsDict = {}
+      for mountpoint in hostInfo["disk_info"]:
+        if not (mountpoint["mountpoint"] in undesirableMountPoints or
+                mountpoint["mountpoint"].startswith(("/boot", "/mnt")) or
+                mountpoint["type"] in undesirableFsTypes or
+                mountpoint["available"] == str(0)):
+          mountPointsDict[mountpoint["mountpoint"]] = to_number(mountpoint["available"])
+      if mountPointsDict:
+        mountPoints = sorted(mountPointsDict, key=mountPointsDict.get, reverse=True)
+    mountPoints.append("/")
+    return mountPoints
+
+  def validatorNotRootFs(self, properties, recommendedDefaults, propertyName, hostInfo):
+    if not propertyName in properties:
+      return self.getErrorItem("Value should be set")
+    dir = properties[propertyName]
+    if not dir.startswith("file://") or dir == recommendedDefaults.get(propertyName):
+      return None
+
+    dir = re.sub("^file://", "", dir, count=1)
+    mountPoints = []
+    for mountPoint in hostInfo["disk_info"]:
+      mountPoints.append(mountPoint["mountpoint"])
+    mountPoint = getMountPointForDir(dir, mountPoints)
+
+    if "/" == mountPoint and self.getPreferredMountPoints(hostInfo)[0] != mountPoint:
+      return self.getWarnItem("It is not recommended to use root partition for {0}".format(propertyName))
+
+    return None
+
+  def validatorEnoughDiskSpace(self, properties, propertyName, hostInfo, reqiuredDiskSpace):
+    if not propertyName in properties:
+      return self.getErrorItem("Value should be set")
+    dir = properties[propertyName]
+    if not dir.startswith("file://"):
+      return None
+
+    dir = re.sub("^file://", "", dir, count=1)
+    mountPoints = {}
+    for mountPoint in hostInfo["disk_info"]:
+      mountPoints[mountPoint["mountpoint"]] = to_number(mountPoint["available"])
+    mountPoint = getMountPointForDir(dir, mountPoints.keys())
+
+    if not mountPoints:
+      return self.getErrorItem("No disk info found on host %s" % hostInfo["host_name"])
+
+    if mountPoints[mountPoint] < reqiuredDiskSpace:
+      msg = "Ambari Metrics disk space requirements not met. \n" \
+            "Recommended disk space for partition {0} is {1}G"
+      return self.getWarnItem(msg.format(mountPoint, reqiuredDiskSpace/1048576)) # in Gb
+    return None
+
+  def validatorLessThenDefaultValue(self, properties, recommendedDefaults, propertyName):
+    if propertyName not in recommendedDefaults:
+      # If a property name exists in say hbase-env and hbase-site (which is allowed), then it will exist in the
+      # "properties" dictionary, but not necessarily in the "recommendedDefaults" dictionary". In this case, ignore it.
+      return None
+
+    if not propertyName in properties:
+      return self.getErrorItem("Value should be set")
+    value = to_number(properties[propertyName])
+    if value is None:
+      return self.getErrorItem("Value should be integer")
+    defaultValue = to_number(recommendedDefaults[propertyName])
+    if defaultValue is None:
+      return None
+    if value < defaultValue:
+      return self.getWarnItem("Value is less than the recommended default of {0}".format(defaultValue))
+    return None
+
+  def validatorEqualsPropertyItem(self, properties1, propertyName1,
+                                  properties2, propertyName2,
+                                  emptyAllowed=False):
+    if not propertyName1 in properties1:
+      return self.getErrorItem("Value should be set for %s" % propertyName1)
+    if not propertyName2 in properties2:
+      return self.getErrorItem("Value should be set for %s" % propertyName2)
+    value1 = properties1.get(propertyName1)
+    if value1 is None and not emptyAllowed:
+      return self.getErrorItem("Empty value for %s" % propertyName1)
+    value2 = properties2.get(propertyName2)
+    if value2 is None and not emptyAllowed:
+      return self.getErrorItem("Empty value for %s" % propertyName2)
+    if value1 != value2:
+      return self.getWarnItem("It is recommended to set equal values "
+             "for properties {0} and {1}".format(propertyName1, propertyName2))
+
+    return None
+
+  def validatorEqualsToRecommendedItem(self, properties, recommendedDefaults,
+                                       propertyName):
+    if not propertyName in properties:
+      return self.getErrorItem("Value should be set for %s" % propertyName)
+    value = properties.get(propertyName)
+    if not propertyName in recommendedDefaults:
+      return self.getErrorItem("Value should be recommended for %s" % propertyName)
+    recommendedValue = recommendedDefaults.get(propertyName)
+    if value != recommendedValue:
+      return self.getWarnItem("It is recommended to set value {0} "
+             "for property {1}".format(recommendedValue, propertyName))
+    return None
+
+  def validateMinMemorySetting(self, properties, defaultValue, propertyName):
+    if not propertyName in properties:
+      return self.getErrorItem("Value should be set")
+    if defaultValue is None:
+      return self.getErrorItem("Config's default value can't be null or undefined")
+
+    value = properties[propertyName]
+    if value is None:
+      return self.getErrorItem("Value can't be null or undefined")
+    try:
+      valueInt = to_number(value)
+      # TODO: generify for other use cases
+      defaultValueInt = int(str(defaultValue).strip())
+      if valueInt < defaultValueInt:
+        return self.getWarnItem("Value is less than the minimum recommended default of -Xmx" + str(defaultValue))
+    except:
+      return None
+
+    return None
+
+  def validatorYarnQueue(self, properties, recommendedDefaults, propertyName, services):
+    if propertyName not in properties:
+      return self.getErrorItem("Value should be set")
+
+    capacity_scheduler_properties, _ = self.getCapacitySchedulerProperties(services)
+    leaf_queue_names = self.getAllYarnLeafQueues(capacity_scheduler_properties)
+    queue_name = properties[propertyName]
+
+    if len(leaf_queue_names) == 0:
+      return None
+    elif queue_name not in leaf_queue_names:
+      return self.getErrorItem("Queue is not exist or not corresponds to existing YARN leaf queue")
+
+    return None
+
+  def recommendYarnQueue(self, services, catalog_name=None, queue_property=None):
+    old_queue_name = None
+
+    if services and 'configurations' in services:
+        configurations = services["configurations"]
+        if catalog_name in configurations and queue_property in configurations[catalog_name]["properties"]:
+          old_queue_name = configurations[catalog_name]["properties"][queue_property]
+
+        capacity_scheduler_properties, _ = self.getCapacitySchedulerProperties(services)
+        leaf_queues = sorted(self.getAllYarnLeafQueues(capacity_scheduler_properties))
+
+        if leaf_queues and (old_queue_name is None or old_queue_name not in leaf_queues):
+          if "default" in leaf_queues:
+            return "default"
+          return leaf_queues.pop()
+        elif old_queue_name and old_queue_name in leaf_queues:
+          return None
+
+    return "default"
+
+  def validateXmxValue(self, properties, recommendedDefaults, propertyName):
+    if not propertyName in properties:
+      return self.getErrorItem("Value should be set")
+    value = properties[propertyName]
+    defaultValue = recommendedDefaults[propertyName]
+    if defaultValue is None:
+      return self.getErrorItem("Config's default value can't be null or undefined")
+    if not checkXmxValueFormat(value) and checkXmxValueFormat(defaultValue):
+      # Xmx is in the default-value but not the value, should be an error
+      return self.getErrorItem('Invalid value format')
+    if not checkXmxValueFormat(defaultValue):
+      # if default value does not contain Xmx, then there is no point in validating existing value
+      return None
+    valueInt = formatXmxSizeToBytes(getXmxSize(value))
+    defaultValueXmx = getXmxSize(defaultValue)
+    defaultValueInt = formatXmxSizeToBytes(defaultValueXmx)
+    if valueInt < defaultValueInt:
+      return self.getWarnItem("Value is less than the recommended default of -Xmx" + defaultValueXmx)
+    return None
+
+  def validateMapReduce2Configurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = [ {"config-name": 'mapreduce.map.java.opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'mapreduce.map.java.opts')},
+                        {"config-name": 'mapreduce.reduce.java.opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'mapreduce.reduce.java.opts')},
+                        {"config-name": 'mapreduce.task.io.sort.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'mapreduce.task.io.sort.mb')},
+                        {"config-name": 'mapreduce.map.memory.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'mapreduce.map.memory.mb')},
+                        {"config-name": 'mapreduce.reduce.memory.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'mapreduce.reduce.memory.mb')},
+                        {"config-name": 'yarn.app.mapreduce.am.resource.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.app.mapreduce.am.resource.mb')},
+                        {"config-name": 'yarn.app.mapreduce.am.command-opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'yarn.app.mapreduce.am.command-opts')},
+                        {"config-name": 'mapreduce.job.queuename', "item": self.validatorYarnQueue(properties, recommendedDefaults, 'mapreduce.job.queuename', services)} ]
+    return self.toConfigurationValidationProblems(validationItems, "mapred-site")
+
+  def validateYARNConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    clusterEnv = getSiteProperties(configurations, "cluster-env")
+    validationItems = [ {"config-name": 'yarn.nodemanager.resource.memory-mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.nodemanager.resource.memory-mb')},
+                        {"config-name": 'yarn.scheduler.minimum-allocation-mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.scheduler.minimum-allocation-mb')},
+                        {"config-name": 'yarn.nodemanager.linux-container-executor.group', "item": self.validatorEqualsPropertyItem(properties, "yarn.nodemanager.linux-container-executor.group", clusterEnv, "user_group")},
+                        {"config-name": 'yarn.scheduler.maximum-allocation-mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.scheduler.maximum-allocation-mb')} ]
+    return self.toConfigurationValidationProblems(validationItems, "yarn-site")
+
+  def validateYARNEnvConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = [{"config-name": 'service_check.queue.name', "item": self.validatorYarnQueue(properties, recommendedDefaults, 'service_check.queue.name', services)} ]
+    return self.toConfigurationValidationProblems(validationItems, "yarn-env")
+
+  def validateHbaseEnvConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    hbase_site = getSiteProperties(configurations, "hbase-site")
+    validationItems = [ {"config-name": 'hbase_regionserver_heapsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'hbase_regionserver_heapsize')},
+                        {"config-name": 'hbase_master_heapsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'hbase_master_heapsize')},
+                        {"config-name": "hbase_user", "item": self.validatorEqualsPropertyItem(properties, "hbase_user", hbase_site, "hbase.superuser")} ]
+    return self.toConfigurationValidationProblems(validationItems, "hbase-env")
+
+  def validateHDFSConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    clusterEnv = getSiteProperties(configurations, "cluster-env")
+    validationItems = [{"config-name": 'dfs.datanode.du.reserved', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'dfs.datanode.du.reserved')},
+                       {"config-name": 

<TRUNCATED>

[47/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/metrics.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/metrics.json
new file mode 100755
index 0000000..c12e09a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/metrics.json
@@ -0,0 +1,2472 @@
+{
+  "METRICS_COLLECTOR": {
+    "Component": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "default": {
+            "metrics/hbase/ipc/ProcessCallTime_75th_percentile": {
+              "metric": "ipc.IPC.ProcessCallTime_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/ProcessCallTime_95th_percentile": {
+              "metric": "ipc.IPC.ProcessCallTime_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/ProcessCallTime_99th_percentile": {
+              "metric": "ipc.IPC.ProcessCallTime_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/ProcessCallTime_max": {
+              "metric": "ipc.IPC.ProcessCallTime_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/ProcessCallTime_mean": {
+              "metric": "ipc.IPC.ProcessCallTime_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/ProcessCallTime_median": {
+              "metric": "ipc.IPC.ProcessCallTime_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/ProcessCallTime_min": {
+              "metric": "ipc.IPC.ProcessCallTime_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/ProcessCallTime_num_ops": {
+              "metric": "ipc.IPC.ProcessCallTime_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/QueueCallTime_75th_percentile": {
+              "metric": "ipc.IPC.QueueCallTime_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/QueueCallTime_95th_percentile": {
+              "metric": "ipc.IPC.QueueCallTime_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/QueueCallTime_99th_percentile": {
+              "metric": "ipc.IPC.QueueCallTime_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/QueueCallTime_max": {
+              "metric": "ipc.IPC.QueueCallTime_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/QueueCallTime_mean": {
+              "metric": "ipc.IPC.QueueCallTime_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/QueueCallTime_median": {
+              "metric": "ipc.IPC.QueueCallTime_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/QueueCallTime_min": {
+              "metric": "ipc.IPC.QueueCallTime_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/QueueCallTime_num_ops": {
+              "metric": "ipc.IPC.QueueCallTime_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/authenticationFailures": {
+              "metric": "ipc.IPC.authenticationFailures",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/authenticationSuccesses": {
+              "metric": "ipc.IPC.authenticationSuccesses",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/authorizationFailures": {
+              "metric": "ipc.IPC.authorizationFailures",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/authorizationSuccesses": {
+              "metric": "ipc.IPC.authorizationSuccesses",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/numActiveHandler": {
+              "metric": "ipc.IPC.numActiveHandler",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/numCallsInGeneralQueue": {
+              "metric": "ipc.IPC.numCallsInGeneralQueue",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/numCallsInPriorityQueue": {
+              "metric": "ipc.IPC.numCallsInPriorityQueue",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/numCallsInReplicationQueue": {
+              "metric": "ipc.IPC.numCallsInReplicationQueue",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/numOpenConnections": {
+              "metric": "ipc.IPC.numOpenConnections",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/queueSize": {
+              "metric": "ipc.IPC.queueSize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/receivedBytes": {
+              "metric": "ipc.IPC.receivedBytes",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/sentBytes": {
+              "metric": "ipc.IPC.sentBytes",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/GcCount": {
+              "metric": "jvm.JvmMetrics.GcCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/GcCountConcurrentMarkSweep": {
+              "metric": "jvm.JvmMetrics.GcCountConcurrentMarkSweep",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/GcCountParNew": {
+              "metric": "jvm.JvmMetrics.GcCountParNew",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/GcTimeMillis": {
+              "metric": "jvm.JvmMetrics.GcTimeMillis",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/GcTimeMillisConcurrentMarkSweep": {
+              "metric": "jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/GcTimeMillisParNew": {
+              "metric": "jvm.JvmMetrics.GcTimeMillisParNew",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/LogError": {
+              "metric": "jvm.JvmMetrics.LogError",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/LogFatal": {
+              "metric": "jvm.JvmMetrics.LogFatal",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/LogInfo": {
+              "metric": "jvm.JvmMetrics.LogInfo",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/LogWarn": {
+              "metric": "jvm.JvmMetrics.LogWarn",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/MemHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/MemHeapMaxM": {
+              "metric": "jvm.JvmMetrics.MemHeapMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/MemHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemHeapUsedM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/MemMaxM": {
+              "metric": "jvm.JvmMetrics.MemMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/MemNonHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/MemNonHeapMaxM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/MemNonHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/ThreadsBlocked": {
+              "metric": "jvm.JvmMetrics.ThreadsBlocked",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/ThreadsNew": {
+              "metric": "jvm.JvmMetrics.ThreadsNew",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/ThreadsRunnable": {
+              "metric": "jvm.JvmMetrics.ThreadsRunnable",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/ThreadsTerminated": {
+              "metric": "jvm.JvmMetrics.ThreadsTerminated",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/ThreadsTimedWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/ThreadsWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsWaiting",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/assignmentManger/Assign_75th_percentile": {
+              "metric": "master.AssignmentManger.Assign_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/assignmentManger/Assign_95th_percentile": {
+              "metric": "master.AssignmentManger.Assign_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/assignmentManger/Assign_99th_percentile": {
+              "metric": "master.AssignmentManger.Assign_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/assignmentManger/Assign_max": {
+              "metric": "master.AssignmentManger.Assign_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/assignmentManger/Assign_mean": {
+              "metric": "master.AssignmentManger.Assign_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/assignmentManger/Assign_median": {
+              "metric": "master.AssignmentManger.Assign_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/assignmentManger/Assign_min": {
+              "metric": "master.AssignmentManger.Assign_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/assignmentManger/Assign_num_ops": {
+              "metric": "master.AssignmentManger.Assign_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/assignmentManger/BulkAssign_75th_percentile": {
+              "metric": "master.AssignmentManger.BulkAssign_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/assignmentManger/BulkAssign_95th_percentile": {
+              "metric": "master.AssignmentManger.BulkAssign_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/assignmentManger/BulkAssign_99th_percentile": {
+              "metric": "master.AssignmentManger.BulkAssign_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/assignmentManger/BulkAssign_max": {
+              "metric": "master.AssignmentManger.BulkAssign_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/assignmentManger/BulkAssign_mean": {
+              "metric": "master.AssignmentManger.BulkAssign_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/assignmentManger/BulkAssign_median": {
+              "metric": "master.AssignmentManger.BulkAssign_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/assignmentManger/BulkAssign_min": {
+              "metric": "master.AssignmentManger.BulkAssign_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/assignmentManger/BulkAssign_num_ops": {
+              "metric": "master.AssignmentManger.BulkAssign_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/assignmentManger/ritCount": {
+              "metric": "master.AssignmentManger.ritCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/assignmentManger/ritCountOverThreshold": {
+              "metric": "master.AssignmentManger.ritCountOverThreshold",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/assignmentManger/ritOldestAge": {
+              "metric": "master.AssignmentManger.ritOldestAge",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/balancer/BalancerCluster_75th_percentile": {
+              "metric": "master.Balancer.BalancerCluster_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/balancer/BalancerCluster_95th_percentile": {
+              "metric": "master.Balancer.BalancerCluster_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/balancer/BalancerCluster_99th_percentile": {
+              "metric": "master.Balancer.BalancerCluster_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/balancer/BalancerCluster_max": {
+              "metric": "master.Balancer.BalancerCluster_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/balancer/BalancerCluster_mean": {
+              "metric": "master.Balancer.BalancerCluster_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/balancer/BalancerCluster_median": {
+              "metric": "master.Balancer.BalancerCluster_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/balancer/BalancerCluster_min": {
+              "metric": "master.Balancer.BalancerCluster_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/balancer/BalancerCluster_num_ops": {
+              "metric": "master.Balancer.BalancerCluster_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/balancer/miscInvocationCount": {
+              "metric": "master.Balancer.miscInvocationCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/HlogSplitSize_75th_percentile": {
+              "metric": "master.FileSystem.HlogSplitSize_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/HlogSplitSize_95th_percentile": {
+              "metric": "master.FileSystem.HlogSplitSize_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/HlogSplitSize_99th_percentile": {
+              "metric": "master.FileSystem.HlogSplitSize_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/HlogSplitSize_max": {
+              "metric": "master.FileSystem.HlogSplitSize_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/HlogSplitSize_mean": {
+              "metric": "master.FileSystem.HlogSplitSize_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/HlogSplitSize_median": {
+              "metric": "master.FileSystem.HlogSplitSize_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/HlogSplitSize_min": {
+              "metric": "master.FileSystem.HlogSplitSize_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/HlogSplitSize_num_ops": {
+              "metric": "master.FileSystem.HlogSplitSize_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/HlogSplitTime_75th_percentile": {
+              "metric": "master.FileSystem.HlogSplitTime_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/HlogSplitTime_95th_percentile": {
+              "metric": "master.FileSystem.HlogSplitTime_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/HlogSplitTime_99th_percentile": {
+              "metric": "master.FileSystem.HlogSplitTime_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/HlogSplitTime_max": {
+              "metric": "master.FileSystem.HlogSplitTime_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/HlogSplitTime_mean": {
+              "metric": "master.FileSystem.HlogSplitTime_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/HlogSplitTime_median": {
+              "metric": "master.FileSystem.HlogSplitTime_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/HlogSplitTime_min": {
+              "metric": "master.FileSystem.HlogSplitTime_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/HlogSplitTime_num_ops": {
+              "metric": "master.FileSystem.HlogSplitTime_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/MetaHlogSplitSize_75th_percentile": {
+              "metric": "master.FileSystem.MetaHlogSplitSize_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/MetaHlogSplitSize_95th_percentile": {
+              "metric": "master.FileSystem.MetaHlogSplitSize_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/MetaHlogSplitSize_99th_percentile": {
+              "metric": "master.FileSystem.MetaHlogSplitSize_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/MetaHlogSplitSize_max": {
+              "metric": "master.FileSystem.MetaHlogSplitSize_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/MetaHlogSplitSize_mean": {
+              "metric": "master.FileSystem.MetaHlogSplitSize_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/MetaHlogSplitSize_median": {
+              "metric": "master.FileSystem.MetaHlogSplitSize_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/MetaHlogSplitSize_min": {
+              "metric": "master.FileSystem.MetaHlogSplitSize_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/MetaHlogSplitSize_num_ops": {
+              "metric": "master.FileSystem.MetaHlogSplitSize_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/MetaHlogSplitTime_75th_percentile": {
+              "metric": "master.FileSystem.MetaHlogSplitTime_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/MetaHlogSplitTime_95th_percentile": {
+              "metric": "master.FileSystem.MetaHlogSplitTime_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/MetaHlogSplitTime_99th_percentile": {
+              "metric": "master.FileSystem.MetaHlogSplitTime_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/MetaHlogSplitTime_max": {
+              "metric": "master.FileSystem.MetaHlogSplitTime_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/MetaHlogSplitTime_mean": {
+              "metric": "master.FileSystem.MetaHlogSplitTime_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/MetaHlogSplitTime_median": {
+              "metric": "master.FileSystem.MetaHlogSplitTime_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/MetaHlogSplitTime_min": {
+              "metric": "master.FileSystem.MetaHlogSplitTime_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/MetaHlogSplitTime_num_ops": {
+              "metric": "master.FileSystem.MetaHlogSplitTime_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/AverageLoad": {
+              "metric": "master.Server.averageLoad",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/clusterRequests": {
+              "metric": "master.Server.clusterRequests",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/masterActiveTime": {
+              "metric": "master.Server.masterActiveTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/masterStartTime": {
+              "metric": "master.Server.masterStartTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/numDeadRegionServers": {
+              "metric": "master.Server.numDeadRegionServers",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/numRegionServers": {
+              "metric": "master.Server.numRegionServers",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/metricssystem/DroppedPubAll": {
+              "metric": "metricssystem.MetricsSystem.DroppedPubAll",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/metricssystem/NumActiveSinks": {
+              "metric": "metricssystem.MetricsSystem.NumActiveSinks",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/metricssystem/NumActiveSources": {
+              "metric": "metricssystem.MetricsSystem.NumActiveSources",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/metricssystem/NumAllSinks": {
+              "metric": "metricssystem.MetricsSystem.NumAllSinks",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/metricssystem/NumAllSources": {
+              "metric": "metricssystem.MetricsSystem.NumAllSources",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/metricssystem/PublishAvgTime": {
+              "metric": "metricssystem.MetricsSystem.PublishAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/metricssystem/PublishNumOps": {
+              "metric": "metricssystem.MetricsSystem.PublishNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/metricssystem/Sink_timelineAvgTime": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/metricssystem/Sink_timelineDropped": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineDropped",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/metricssystem/Sink_timelineNumOps": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/metricssystem/Sink_timelineQsize": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineQsize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/metricssystem/SnapshotAvgTime": {
+              "metric": "metricssystem.MetricsSystem.SnapshotAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/metricssystem/SnapshotNumOps": {
+              "metric": "metricssystem.MetricsSystem.SnapshotNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Append_75th_percentile": {
+              "metric": "regionserver.Server.Append_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Append_95th_percentile": {
+              "metric": "regionserver.Server.Append_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Append_99th_percentile": {
+              "metric": "regionserver.Server.Append_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Append_max": {
+              "metric": "regionserver.Server.Append_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Append_mean": {
+              "metric": "regionserver.Server.Append_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Append_median": {
+              "metric": "regionserver.Server.Append_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Append_min": {
+              "metric": "regionserver.Server.Append_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Append_num_ops": {
+              "metric": "regionserver.Server.Append_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Delete_75th_percentile": {
+              "metric": "regionserver.Server.Delete_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Delete_95th_percentile": {
+              "metric": "regionserver.Server.Delete_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Delete_99th_percentile": {
+              "metric": "regionserver.Server.Delete_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Delete_max": {
+              "metric": "regionserver.Server.Delete_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Delete_mean": {
+              "metric": "regionserver.Server.Delete_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Delete_median": {
+              "metric": "regionserver.Server.Delete_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Delete_min": {
+              "metric": "regionserver.Server.Delete_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Delete_num_ops": {
+              "metric": "regionserver.Server.Delete_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Get_75th_percentile": {
+              "metric": "regionserver.Server.Get_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Get_95th_percentile": {
+              "metric": "regionserver.Server.Get_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Get_99th_percentile": {
+              "metric": "regionserver.Server.Get_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Get_max": {
+              "metric": "regionserver.Server.Get_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Get_mean": {
+              "metric": "regionserver.Server.Get_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Get_median": {
+              "metric": "regionserver.Server.Get_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Get_min": {
+              "metric": "regionserver.Server.Get_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Get_num_ops": {
+              "metric": "regionserver.Server.Get_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Increment_75th_percentile": {
+              "metric": "regionserver.Server.Increment_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Increment_95th_percentile": {
+              "metric": "regionserver.Server.Increment_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Increment_99th_percentile": {
+              "metric": "regionserver.Server.Increment_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Increment_max": {
+              "metric": "regionserver.Server.Increment_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Increment_mean": {
+              "metric": "regionserver.Server.Increment_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Increment_median": {
+              "metric": "regionserver.Server.Increment_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Increment_min": {
+              "metric": "regionserver.Server.Increment_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Increment_num_ops": {
+              "metric": "regionserver.Server.Increment_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Mutate_75th_percentile": {
+              "metric": "regionserver.Server.Mutate_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Mutate_95th_percentile": {
+              "metric": "regionserver.Server.Mutate_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Mutate_99th_percentile": {
+              "metric": "regionserver.Server.Mutate_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Mutate_max": {
+              "metric": "regionserver.Server.Mutate_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Mutate_mean": {
+              "metric": "regionserver.Server.Mutate_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Mutate_median": {
+              "metric": "regionserver.Server.Mutate_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Mutate_min": {
+              "metric": "regionserver.Server.Mutate_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Mutate_num_ops": {
+              "metric": "regionserver.Server.Mutate_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Replay_75th_percentile": {
+              "metric": "regionserver.Server.Replay_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Replay_95th_percentile": {
+              "metric": "regionserver.Server.Replay_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Replay_99th_percentile": {
+              "metric": "regionserver.Server.Replay_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Replay_max": {
+              "metric": "regionserver.Server.Replay_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Replay_mean": {
+              "metric": "regionserver.Server.Replay_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Replay_median": {
+              "metric": "regionserver.Server.Replay_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Replay_min": {
+              "metric": "regionserver.Server.Replay_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Replay_num_ops": {
+              "metric": "regionserver.Server.Replay_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheCount": {
+              "metric": "regionserver.Server.blockCacheCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheEvictionCount": {
+              "metric": "regionserver.Server.blockCacheEvictionCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheExpressHitPercent": {
+              "metric": "regionserver.Server.blockCacheExpressHitPercent",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheFreeSize": {
+              "metric": "regionserver.Server.blockCacheFreeSize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheHitPercent": {
+              "metric": "regionserver.Server.blockCacheCountHitPercent",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheHitCount": {
+              "metric": "regionserver.Server.blockCacheHitCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheMissCount": {
+              "metric": "regionserver.Server.blockCacheMissCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheSize": {
+              "metric": "regionserver.Server.blockCacheSize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/checkMutateFailedCount": {
+              "metric": "regionserver.Server.checkMutateFailedCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/checkMutatePassedCount": {
+              "metric": "regionserver.Server.checkMutatePassedCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/compactionQueueSize": {
+              "metric": "regionserver.Server.compactionQueueLength",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/flushQueueLength": {
+              "metric": "regionserver.Server.flushQueueLength",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/hlogFileCount": {
+              "metric": "regionserver.Server.hlogFileCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/hlogFileSize": {
+              "metric": "regionserver.Server.hlogFileSize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/memStoreSize": {
+              "metric": "regionserver.Server.memStoreSize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/mutationsWithoutWALCount": {
+              "metric": "regionserver.Server.mutationsWithoutWALCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/mutationsWithoutWALSize": {
+              "metric": "regionserver.Server.mutationsWithoutWALSize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/percentFilesLocal": {
+              "metric": "regionserver.Server.percentFilesLocal",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/readRequestCount": {
+              "metric": "regionserver.Server.readRequestCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/regions": {
+              "metric": "regionserver.Server.regionCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/regionServerStartTime": {
+              "metric": "regionserver.Server.regionServerStartTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/slowAppendCount": {
+              "metric": "regionserver.Server.slowAppendCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/slowDeleteCount": {
+              "metric": "regionserver.Server.slowDeleteCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/slowGetCount": {
+              "metric": "regionserver.Server.slowGetCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/slowIncrementCount": {
+              "metric": "regionserver.Server.slowIncrementCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/slowPutCount": {
+              "metric": "regionserver.Server.slowPutCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/staticBloomSize": {
+              "metric": "regionserver.Server.staticBloomSize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/staticIndexSize": {
+              "metric": "regionserver.Server.staticIndexSize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/storeCount": {
+              "metric": "regionserver.Server.storeCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/storefiles": {
+              "metric": "regionserver.Server.storeFileCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/storeFileIndexSize": {
+              "metric": "regionserver.Server.storeFileIndexSize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/storeFileSize": {
+              "metric": "regionserver.Server.storeFileSize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/requests": {
+              "metric": "regionserver.Server.totalRequestCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/updatesBlockedTime": {
+              "metric": "regionserver.Server.updatesBlockedTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/writeRequestCount": {
+              "metric": "regionserver.Server.writeRequestCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/wal/AppendSize_75th_percentile": {
+              "metric": "regionserver.WAL.AppendSize_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/wal/AppendSize_95th_percentile": {
+              "metric": "regionserver.WAL.AppendSize_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/wal/AppendSize_99th_percentile": {
+              "metric": "regionserver.WAL.AppendSize_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/wal/AppendSize_max": {
+              "metric": "regionserver.WAL.AppendSize_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/wal/AppendSize_mean": {
+              "metric": "regionserver.WAL.AppendSize_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/wal/AppendSize_median": {
+              "metric": "regionserver.WAL.AppendSize_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/wal/AppendSize_min": {
+              "metric": "regionserver.WAL.AppendSize_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/wal/AppendSize_num_ops": {
+              "metric": "regionserver.WAL.AppendSize_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/wal/AppendTime_75th_percentile": {
+              "metric": "regionserver.WAL.AppendTime_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/wal/AppendTime_95th_percentile": {
+              "metric": "regionserver.WAL.AppendTime_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/wal/AppendTime_99th_percentile": {
+              "metric": "regionserver.WAL.AppendTime_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/wal/AppendTime_max": {
+              "metric": "regionserver.WAL.AppendTime_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/wal/AppendTime_mean": {
+              "metric": "regionserver.WAL.AppendTime_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/wal/AppendTime_median": {
+              "metric": "regionserver.WAL.AppendTime_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/wal/AppendTime_min": {
+              "metric": "regionserver.WAL.AppendTime_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/wal/AppendTime_num_ops": {
+              "metric": "regionserver.WAL.AppendTime_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/wal/SyncTime_75th_percentile": {
+              "metric": "regionserver.WAL.SyncTime_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/wal/SyncTime_95th_percentile": {
+              "metric": "regionserver.WAL.SyncTime_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/wal/SyncTime_99th_percentile": {
+              "metric": "regionserver.WAL.SyncTime_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/wal/SyncTime_max": {
+              "metric": "regionserver.WAL.SyncTime_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/wal/SyncTime_mean": {
+              "metric": "regionserver.WAL.SyncTime_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/wal/SyncTime_median": {
+              "metric": "regionserver.WAL.SyncTime_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/wal/SyncTime_min": {
+              "metric": "regionserver.WAL.SyncTime_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/wal/SyncTime_num_ops": {
+              "metric": "regionserver.WAL.SyncTime_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/wal/appendCount": {
+              "metric": "regionserver.WAL.appendCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/wal/slowAppendCount": {
+              "metric": "regionserver.WAL.slowAppendCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ugi/GetGroupsAvgTime": {
+              "metric": "ugi.UgiMetrics.GetGroupsAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ugi/GetGroupsNumOps": {
+              "metric": "ugi.UgiMetrics.GetGroupsNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ugi/LoginFailureAvgTime": {
+              "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ugi/LoginFailureNumOps": {
+              "metric": "ugi.UgiMetrics.LoginFailureNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ugi/LoginSuccessAvgTime": {
+              "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ugi/LoginSuccessNumOps": {
+              "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
+              "pointInTime": true,
+              "temporal": true
+            }
+          }
+        }
+      }
+    ],
+    "HostComponent": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "default": {
+            "metrics/hbase/ipc/ProcessCallTime_75th_percentile": {
+              "metric": "ipc.IPC.ProcessCallTime_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/ProcessCallTime_95th_percentile": {
+              "metric": "ipc.IPC.ProcessCallTime_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/ProcessCallTime_99th_percentile": {
+              "metric": "ipc.IPC.ProcessCallTime_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/ProcessCallTime_max": {
+              "metric": "ipc.IPC.ProcessCallTime_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/ProcessCallTime_mean": {
+              "metric": "ipc.IPC.ProcessCallTime_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/ProcessCallTime_median": {
+              "metric": "ipc.IPC.ProcessCallTime_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/ProcessCallTime_min": {
+              "metric": "ipc.IPC.ProcessCallTime_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/ProcessCallTime_num_ops": {
+              "metric": "ipc.IPC.ProcessCallTime_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/QueueCallTime_75th_percentile": {
+              "metric": "ipc.IPC.QueueCallTime_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/QueueCallTime_95th_percentile": {
+              "metric": "ipc.IPC.QueueCallTime_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/QueueCallTime_99th_percentile": {
+              "metric": "ipc.IPC.QueueCallTime_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/QueueCallTime_max": {
+              "metric": "ipc.IPC.QueueCallTime_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/QueueCallTime_mean": {
+              "metric": "ipc.IPC.QueueCallTime_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/QueueCallTime_median": {
+              "metric": "ipc.IPC.QueueCallTime_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/QueueCallTime_min": {
+              "metric": "ipc.IPC.QueueCallTime_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/QueueCallTime_num_ops": {
+              "metric": "ipc.IPC.QueueCallTime_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/authenticationFailures": {
+              "metric": "ipc.IPC.authenticationFailures",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/authenticationSuccesses": {
+              "metric": "ipc.IPC.authenticationSuccesses",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/authorizationFailures": {
+              "metric": "ipc.IPC.authorizationFailures",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/authorizationSuccesses": {
+              "metric": "ipc.IPC.authorizationSuccesses",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/numActiveHandler": {
+              "metric": "ipc.IPC.numActiveHandler",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/numCallsInGeneralQueue": {
+              "metric": "ipc.IPC.numCallsInGeneralQueue",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/numCallsInPriorityQueue": {
+              "metric": "ipc.IPC.numCallsInPriorityQueue",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/numCallsInReplicationQueue": {
+              "metric": "ipc.IPC.numCallsInReplicationQueue",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/numOpenConnections": {
+              "metric": "ipc.IPC.numOpenConnections",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/queueSize": {
+              "metric": "ipc.IPC.queueSize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/receivedBytes": {
+              "metric": "ipc.IPC.receivedBytes",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/sentBytes": {
+              "metric": "ipc.IPC.sentBytes",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/GcCount": {
+              "metric": "jvm.JvmMetrics.GcCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/GcCountConcurrentMarkSweep": {
+              "metric": "jvm.JvmMetrics.GcCountConcurrentMarkSweep",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/GcCountParNew": {
+              "metric": "jvm.JvmMetrics.GcCountParNew",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/GcTimeMillis": {
+              "metric": "jvm.JvmMetrics.GcTimeMillis",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/GcTimeMillisConcurrentMarkSweep": {
+              "metric": "jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/GcTimeMillisParNew": {
+              "metric": "jvm.JvmMetrics.GcTimeMillisParNew",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/LogError": {
+              "metric": "jvm.JvmMetrics.LogError",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/LogFatal": {
+              "metric": "jvm.JvmMetrics.LogFatal",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/LogInfo": {
+              "metric": "jvm.JvmMetrics.LogInfo",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/LogWarn": {
+              "metric": "jvm.JvmMetrics.LogWarn",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/MemHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/MemHeapMaxM": {
+              "metric": "jvm.JvmMetrics.MemHeapMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/MemHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemHeapUsedM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/MemMaxM": {
+              "metric": "jvm.JvmMetrics.MemMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/MemNonHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/MemNonHeapMaxM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/MemNonHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/ThreadsBlocked": {
+              "metric": "jvm.JvmMetrics.ThreadsBlocked",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/ThreadsNew": {
+              "metric": "jvm.JvmMetrics.ThreadsNew",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/ThreadsRunnable": {
+              "metric": "jvm.JvmMetrics.ThreadsRunnable",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/ThreadsTerminated": {
+              "metric": "jvm.JvmMetrics.ThreadsTerminated",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/ThreadsTimedWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/jvm/ThreadsWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsWaiting",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/assignmentManger/Assign_75th_percentile": {
+              "metric": "master.AssignmentManger.Assign_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/assignmentManger/Assign_95th_percentile": {
+              "metric": "master.AssignmentManger.Assign_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/assignmentManger/Assign_99th_percentile": {
+              "metric": "master.AssignmentManger.Assign_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/assignmentManger/Assign_max": {
+              "metric": "master.AssignmentManger.Assign_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/assignmentManger/Assign_mean": {
+              "metric": "master.AssignmentManger.Assign_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/assignmentManger/Assign_median": {
+              "metric": "master.AssignmentManger.Assign_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/assignmentManger/Assign_min": {
+              "metric": "master.AssignmentManger.Assign_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/assignmentManger/Assign_num_ops": {
+              "metric": "master.AssignmentManger.Assign_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/assignmentManger/BulkAssign_75th_percentile": {
+              "metric": "master.AssignmentManger.BulkAssign_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/assignmentManger/BulkAssign_95th_percentile": {
+              "metric": "master.AssignmentManger.BulkAssign_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/assignmentManger/BulkAssign_99th_percentile": {
+              "metric": "master.AssignmentManger.BulkAssign_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/assignmentManger/BulkAssign_max": {
+              "metric": "master.AssignmentManger.BulkAssign_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/assignmentManger/BulkAssign_mean": {
+              "metric": "master.AssignmentManger.BulkAssign_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/assignmentManger/BulkAssign_median": {
+              "metric": "master.AssignmentManger.BulkAssign_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/assignmentManger/BulkAssign_min": {
+              "metric": "master.AssignmentManger.BulkAssign_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/assignmentManger/BulkAssign_num_ops": {
+              "metric": "master.AssignmentManger.BulkAssign_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/assignmentManger/ritCount": {
+              "metric": "master.AssignmentManger.ritCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/assignmentManger/ritCountOverThreshold": {
+              "metric": "master.AssignmentManger.ritCountOverThreshold",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/assignmentManger/ritOldestAge": {
+              "metric": "master.AssignmentManger.ritOldestAge",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/balancer/BalancerCluster_75th_percentile": {
+              "metric": "master.Balancer.BalancerCluster_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/balancer/BalancerCluster_95th_percentile": {
+              "metric": "master.Balancer.BalancerCluster_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/balancer/BalancerCluster_99th_percentile": {
+              "metric": "master.Balancer.BalancerCluster_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/balancer/BalancerCluster_max": {
+              "metric": "master.Balancer.BalancerCluster_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/balancer/BalancerCluster_mean": {
+              "metric": "master.Balancer.BalancerCluster_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/balancer/BalancerCluster_median": {
+              "metric": "master.Balancer.BalancerCluster_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/balancer/BalancerCluster_min": {
+              "metric": "master.Balancer.BalancerCluster_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/balancer/BalancerCluster_num_ops": {
+              "metric": "master.Balancer.BalancerCluster_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/balancer/miscInvocationCount": {
+              "metric": "master.Balancer.miscInvocationCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/HlogSplitSize_75th_percentile": {
+              "metric": "master.FileSystem.HlogSplitSize_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/HlogSplitSize_95th_percentile": {
+              "metric": "master.FileSystem.HlogSplitSize_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/HlogSplitSize_99th_percentile": {
+              "metric": "master.FileSystem.HlogSplitSize_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/HlogSplitSize_max": {
+              "metric": "master.FileSystem.HlogSplitSize_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/HlogSplitSize_mean": {
+              "metric": "master.FileSystem.HlogSplitSize_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/HlogSplitSize_median": {
+              "metric": "master.FileSystem.HlogSplitSize_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/HlogSplitSize_min": {
+              "metric": "master.FileSystem.HlogSplitSize_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/HlogSplitSize_num_ops": {
+              "metric": "master.FileSystem.HlogSplitSize_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/HlogSplitTime_75th_percentile": {
+              "metric": "master.FileSystem.HlogSplitTime_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/HlogSplitTime_95th_percentile": {
+              "metric": "master.FileSystem.HlogSplitTime_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/HlogSplitTime_99th_percentile": {
+              "metric": "master.FileSystem.HlogSplitTime_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/HlogSplitTime_max": {
+              "metric": "master.FileSystem.HlogSplitTime_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/HlogSplitTime_mean": {
+              "metric": "master.FileSystem.HlogSplitTime_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/HlogSplitTime_median": {
+              "metric": "master.FileSystem.HlogSplitTime_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/HlogSplitTime_min": {
+              "metric": "master.FileSystem.HlogSplitTime_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/HlogSplitTime_num_ops": {
+              "metric": "master.FileSystem.HlogSplitTime_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/MetaHlogSplitSize_75th_percentile": {
+              "metric": "master.FileSystem.MetaHlogSplitSize_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/MetaHlogSplitSize_95th_percentile": {
+              "metric": "master.FileSystem.MetaHlogSplitSize_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/MetaHlogSplitSize_99th_percentile": {
+              "metric": "master.FileSystem.MetaHlogSplitSize_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/MetaHlogSplitSize_max": {
+              "metric": "master.FileSystem.MetaHlogSplitSize_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/MetaHlogSplitSize_mean": {
+              "metric": "master.FileSystem.MetaHlogSplitSize_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/MetaHlogSplitSize_median": {
+              "metric": "master.FileSystem.MetaHlogSplitSize_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/MetaHlogSplitSize_min": {
+              "metric": "master.FileSystem.MetaHlogSplitSize_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/MetaHlogSplitSize_num_ops": {
+              "metric": "master.FileSystem.MetaHlogSplitSize_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/MetaHlogSplitTime_75th_percentile": {
+              "metric": "master.FileSystem.MetaHlogSplitTime_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/MetaHlogSplitTime_95th_percentile": {
+              "metric": "master.FileSystem.MetaHlogSplitTime_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/MetaHlogSplitTime_99th_percentile": {
+              "metric": "master.FileSystem.MetaHlogSplitTime_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/MetaHlogSplitTime_max": {
+              "metric": "master.FileSystem.MetaHlogSplitTime_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/MetaHlogSplitTime_mean": {
+              "metric": "master.FileSystem.MetaHlogSplitTime_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/MetaHlogSplitTime_median": {
+              "metric": "master.FileSystem.MetaHlogSplitTime_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/MetaHlogSplitTime_min": {
+              "metric": "master.FileSystem.MetaHlogSplitTime_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/fileSystem/MetaHlogSplitTime_num_ops": {
+              "metric": "master.FileSystem.MetaHlogSplitTime_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/AverageLoad": {
+              "metric": "master.Server.averageLoad",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/clusterRequests": {
+              "metric": "master.Server.clusterRequests",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/masterActiveTime": {
+              "metric": "master.Server.masterActiveTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/masterStartTime": {
+              "metric": "master.Server.masterStartTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/numDeadRegionServers": {
+              "metric": "master.Server.numDeadRegionServers",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/master/numRegionServers": {
+              "metric": "master.Server.numRegionServers",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/metricssystem/DroppedPubAll": {
+              "metric": "metricssystem.MetricsSystem.DroppedPubAll",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/metricssystem/NumActiveSinks": {
+              "metric": "metricssystem.MetricsSystem.NumActiveSinks",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/metricssystem/NumActiveSources": {
+              "metric": "metricssystem.MetricsSystem.NumActiveSources",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/metricssystem/NumAllSinks": {
+              "metric": "metricssystem.MetricsSystem.NumAllSinks",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/metricssystem/NumAllSources": {
+              "metric": "metricssystem.MetricsSystem.NumAllSources",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/metricssystem/PublishAvgTime": {
+              "metric": "metricssystem.MetricsSystem.PublishAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/metricssystem/PublishNumOps": {
+              "metric": "metricssystem.MetricsSystem.PublishNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/metricssystem/Sink_timelineAvgTime": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/metricssystem/Sink_timelineDropped": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineDropped",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/metricssystem/Sink_timelineNumOps": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/metricssystem/Sink_timelineQsize": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineQsize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/metricssystem/SnapshotAvgTime": {
+              "metric": "metricssystem.MetricsSystem.SnapshotAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/metricssystem/SnapshotNumOps": {
+              "metric": "metricssystem.MetricsSystem.SnapshotNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Append_75th_percentile": {
+              "metric": "regionserver.Server.Append_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Append_95th_percentile": {
+              "metric": "regionserver.Server.Append_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Append_99th_percentile": {
+              "metric": "regionserver.Server.Append_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Append_max": {
+              "metric": "regionserver.Server.Append_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Append_mean": {
+              "metric": "regionserver.Server.Append_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Append_median": {
+              "metric": "regionserver.Server.Append_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Append_min": {
+              "metric": "regionserver.Server.Append_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Append_num_ops": {
+              "metric": "regionserver.Server.Append_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Delete_75th_percentile": {
+              "metric": "regionserver.Server.Delete_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Delete_95th_percentile": {
+              "metric": "regionserver.Server.Delete_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Delete_99th_percentile": {
+              "metric": "regionserver.Server.Delete_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Delete_max": {
+              "metric": "regionserver.Server.Delete_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Delete_mean": {
+              "metric": "regionserver.Server.Delete_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Delete_median": {
+              "metric": "regionserver.Server.Delete_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Delete_min": {
+              "metric": "regionserver.Server.Delete_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Delete_num_ops": {
+              "metric": "regionserver.Server.Delete_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Get_75th_percentile": {
+              "metric": "regionserver.Server.Get_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Get_95th_percentile": {
+              "metric": "regionserver.Server.Get_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Get_99th_percentile": {
+              "metric": "regionserver.Server.Get_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Get_max": {
+              "metric": "regionserver.Server.Get_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Get_mean": {
+              "metric": "regionserver.Server.Get_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Get_median": {
+              "metric": "regionserver.Server.Get_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Get_min": {
+              "metric": "regionserver.Server.Get_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Get_num_ops": {
+              "metric": "regionserver.Server.Get_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Increment_75th_percentile": {
+              "metric": "regionserver.Server.Increment_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Increment_95th_percentile": {
+              "metric": "regionserver.Server.Increment_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Increment_99th_percentile": {
+              "metric": "regionserver.Server.Increment_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Increment_max": {
+              "metric": "regionserver.Server.Increment_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Increment_mean": {
+              "metric": "regionserver.Server.Increment_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Increment_median": {
+              "metric": "regionserver.Server.Increment_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Increment_min": {
+              "metric": "regionserver.Server.Increment_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Increment_num_ops": {
+              "metric": "regionserver.Server.Increment_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Mutate_75th_percentile": {
+              "metric": "regionserver.Server.Mutate_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Mutate_95th_percentile": {
+              "metric": "regionserver.Server.Mutate_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Mutate_99th_percentile": {
+              "metric": "regionserver.Server.Mutate_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Mutate_max": {
+              "metric": "regionserver.Server.Mutate_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Mutate_mean": {
+              "metric": "regionserver.Server.Mutate_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Mutate_median": {
+              "metric": "regionserver.Server.Mutate_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Mutate_min": {
+              "metric": "regionserver.Server.Mutate_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Mutate_num_ops": {
+              "metric": "regionserver.Server.Mutate_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Replay_75th_percentile": {
+              "metric": "regionserver.Server.Replay_75th_percentile",
+              "pointI

<TRUNCATED>

[21/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/oozie_server_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/oozie_server_upgrade.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/oozie_server_upgrade.py
new file mode 100755
index 0000000..aa02f64
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/oozie_server_upgrade.py
@@ -0,0 +1,300 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import glob
+import os
+import shutil
+import tarfile
+import tempfile
+
+from resource_management.core import shell
+from resource_management.core.logger import Logger
+from resource_management.core.exceptions import Fail
+from resource_management.core.resources.system import Execute, Directory
+from resource_management.libraries.functions import Direction
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import compare_versions
+from resource_management.libraries.functions import format_stack_version
+from resource_management.libraries.functions import tar_archive
+from resource_management.libraries.functions import stack_select
+import oozie
+
+BACKUP_TEMP_DIR = "oozie-upgrade-backup"
+BACKUP_CONF_ARCHIVE = "oozie-conf-backup.tar"
+
+
+def backup_configuration():
+  """
+  Backs up the oozie configuration as part of the upgrade process.
+  :return:
+  """
+  Logger.info('Backing up Oozie configuration directory before upgrade...')
+  directoryMappings = _get_directory_mappings()
+
+  absolute_backup_dir = os.path.join(tempfile.gettempdir(), BACKUP_TEMP_DIR)
+  if not os.path.isdir(absolute_backup_dir):
+    os.makedirs(absolute_backup_dir)
+
+  for directory in directoryMappings:
+    if not os.path.isdir(directory):
+      raise Fail("Unable to backup missing directory {0}".format(directory))
+
+    archive = os.path.join(absolute_backup_dir, directoryMappings[directory])
+    Logger.info('Compressing {0} to {1}'.format(directory, archive))
+
+    if os.path.exists(archive):
+      os.remove(archive)
+
+    # backup the directory, following symlinks instead of including them
+    tar_archive.archive_directory_dereference(archive, directory)
+
+
+def restore_configuration():
+  """
+  Restores the configuration backups to their proper locations after an
+  upgrade has completed.
+  :return:
+  """
+  Logger.info('Restoring Oozie configuration directory after upgrade...')
+  directoryMappings = _get_directory_mappings()
+
+  for directory in directoryMappings:
+    archive = os.path.join(tempfile.gettempdir(), BACKUP_TEMP_DIR,
+      directoryMappings[directory])
+
+    if not os.path.isfile(archive):
+      raise Fail("Unable to restore missing backup archive {0}".format(archive))
+
+    Logger.info('Extracting {0} to {1}'.format(archive, directory))
+
+    tar_archive.untar_archive(archive, directory)
+
+  # cleanup
+  Directory(os.path.join(tempfile.gettempdir(), BACKUP_TEMP_DIR),
+            action="delete"
+  )
+
+
+def prepare_libext_directory():
+  """
+  Creates /usr/iop/current/oozie/libext-customer and recursively sets
+  777 permissions on it and its parents.
+  :return:
+  """
+  import params
+
+  # some versions of IOP don't need the lzo compression libraries
+  target_version_needs_compression_libraries = compare_versions(
+    format_stack_version(params.version), '4.0.0.0') >= 0
+
+  if not os.path.isdir(params.oozie_libext_customer_dir):
+    os.makedirs(params.oozie_libext_customer_dir, 0o777)
+
+  # ensure that it's rwx for all
+  os.chmod(params.oozie_libext_customer_dir, 0o777)
+
+  # get all hadooplzo* JAR files
+  # stack-select set hadoop-client has not run yet, therefore we cannot use
+  # /usr/iop/current/hadoop-client ; we must use params.version directly
+  # however, this only works when upgrading beyond 4.0.0.0; don't do this
+  # for downgrade to 4.0.0.0 since hadoop-lzo will not be present
+  # This can also be called during a Downgrade.
+  # When a version is Intalled, it is responsible for downloading the hadoop-lzo packages
+  # if lzo is enabled.
+  if params.lzo_enabled and (params.upgrade_direction == Direction.UPGRADE or target_version_needs_compression_libraries):
+    hadoop_lzo_pattern = 'hadoop-lzo*.jar'
+    hadoop_client_new_lib_dir = format("/usr/iop/{version}/hadoop/lib")
+
+    files = glob.iglob(os.path.join(hadoop_client_new_lib_dir, hadoop_lzo_pattern))
+    if not files:
+      raise Fail("There are no files at {0} matching {1}".format(
+        hadoop_client_new_lib_dir, hadoop_lzo_pattern))
+
+    # copy files into libext
+    files_copied = False
+    for file in files:
+      if os.path.isfile(file):
+        Logger.info("Copying {0} to {1}".format(str(file), params.oozie_libext_customer_dir))
+        shutil.copy2(file, params.oozie_libext_customer_dir)
+        files_copied = True
+
+    if not files_copied:
+      raise Fail("There are no files at {0} matching {1}".format(
+        hadoop_client_new_lib_dir, hadoop_lzo_pattern))
+
+  # copy ext ZIP to customer dir
+  oozie_ext_zip_file = '/usr/share/IOP-oozie/ext-2.2.zip'
+  if not os.path.isfile(oozie_ext_zip_file):
+    raise Fail("Unable to copy {0} because it does not exist".format(oozie_ext_zip_file))
+
+  Logger.info("Copying {0} to {1}".format(oozie_ext_zip_file, params.oozie_libext_customer_dir))
+  shutil.copy2(oozie_ext_zip_file, params.oozie_libext_customer_dir)
+
+def upgrade_oozie_database_and_sharelib():
+  """
+  Performs the creation and upload of the sharelib and the upgrade of the
+  database. This method will also perform a kinit if necessary.
+  It is run before the upgrade of oozie begins exactly once as part of the
+  upgrade orchestration.
+
+  Since this runs before the upgrade has occurred, it should not use any
+  "current" directories since they will still be pointing to the older
+  version of Oozie. Instead, it should use versioned directories to ensure
+  that the commands running are from the oozie version about to be upgraded to.
+  :return:
+  """
+  import params
+  # get the kerberos token if necessary to execute commands as oozie
+  if params.security_enabled:
+    oozie_principal_with_host = params.oozie_principal.replace("_HOST", params.hostname)
+    command = format("{kinit_path_local} -kt {oozie_keytab} {oozie_principal_with_host}")
+    Execute(command, user=params.oozie_user)
+
+  upgrade_stack = stack_select._get_upgrade_stack()
+  if upgrade_stack is None or len(upgrade_stack) < 2 or upgrade_stack[1] is None:
+    raise Fail("Unable to determine the stack that is being upgraded to or downgraded to.")
+  stack_version = upgrade_stack[1]
+
+  # upgrade oozie DB
+  Logger.info('Upgrading the Oozie database...')
+  oozie.download_database_library_if_needed()
+  database_upgrade_command = "/usr/iop/{0}/oozie/bin/ooziedb.sh upgrade -run".format(stack_version)
+  Execute(database_upgrade_command, user=params.oozie_user, logoutput=True)
+  create_sharelib()
+
+def create_sharelib():
+  """
+  Performs the creation and upload of the sharelib.
+  This method will also perform a kinit if necessary.
+  It is run before the upgrade of oozie begins exactly once as part of the
+  upgrade orchestration.
+
+  Since this runs before the upgrade has occurred, it should not use any
+  "current" directories since they will still be pointing to the older
+  version of Oozie. Instead, it should use versioned directories to ensure
+  that the commands running are from the oozie version about to be upgraded to.
+  """
+  import params
+  Logger.info('Creating a new sharelib and uploading it to HDFS...')
+  # ensure the oozie directory exists for the sharelib
+  params.HdfsResource(format("{oozie_hdfs_user_dir}/share"),
+    action = "create_on_execute",
+    type = "directory",
+    owner = "oozie",
+    group = "hadoop",
+    mode = 0755,
+    recursive_chmod = True)
+
+  params.HdfsResource(None, action = "execute")
+
+  upgrade_stack = stack_select._get_upgrade_stack()
+  if upgrade_stack is None or upgrade_stack[1] is None:
+    raise Fail("Unable to determine the stack that is being upgraded to or downgraded to.")
+
+  stack_version = upgrade_stack[1]
+
+  # install new sharelib to HDFS
+  sharelib_command = "/usr/iop/{0}/oozie/bin/oozie-setup.sh sharelib create -fs {1}".format(
+    stack_version, params.fs_root)
+  Execute(sharelib_command, user=params.oozie_user, logoutput=True)
+
+
+def upgrade_oozie():
+  """
+  Performs the upgrade of the oozie WAR file and database.
+  :return:
+  """
+  import params
+
+  # get the kerberos token if necessary to execute commands as oozie
+  if params.security_enabled:
+    oozie_principal_with_host = params.oozie_principal.replace("_HOST", params.hostname)
+    command = format("{kinit_path_local} -kt {oozie_keytab} {oozie_principal_with_host}")
+    Execute(command, user=params.oozie_user)
+
+  # ensure that HDFS is prepared to receive the new sharelib
+  command = format("hdfs dfs -chown {params.oozie_user}:{params.user_group} {oozie_hdfs_user_dir}/share")
+  Execute(command, user=params.oozie_user)
+
+  command = format("hdfs dfs -chmod -R 755 {oozie_hdfs_user_dir}/share")
+  Execute(command, user=params.oozie_user)
+
+  # upgrade oozie DB
+  command = format("{oozie_home}/bin/ooziedb.sh upgrade -run")
+  Execute(command, user=params.oozie_user)
+
+  # prepare the oozie WAR
+  '''command = format("{oozie_setup_sh} prepare-war -d {oozie_libext_customer_dir}")
+  return_code, oozie_output = shell.call(command)
+
+  if return_code != 0 or "New Oozie WAR file with added" not in oozie_output:
+    message = "Unexpected Oozie WAR preparation output {0}".format(oozie_output)
+    Logger.error(message)
+    raise Fail(message)'''
+
+  # install new sharelib to HDFS
+  command = format("{oozie_setup_sh} sharelib create -fs {fs_root}")
+  Execute(command, user=params.oozie_user)
+
+
+def _get_directory_mappings():
+  """
+  Gets a dictionary of directory to archive name that represents the
+  directories that need to be backed up and their output tarball archive targets
+  :return:  the dictionary of directory to tarball mappings
+  """
+  import params
+
+  return { params.conf_dir : BACKUP_CONF_ARCHIVE }
+
+def prepare_warfile():
+  """
+  Invokes the 'prepare-war' command in Oozie in order to create the WAR.
+  The prepare-war command uses the input WAR from ${OOZIE_HOME}/oozie.war and
+  outputs the prepared WAR to ${CATALINA_BASE}/webapps/oozie.war - because of this,
+  both of these environment variables must point to the upgraded oozie-server path and
+  not oozie-client since it was not yet updated.
+
+  This method will also perform a kinit if necessary.
+  :return:
+  """
+  import params
+
+  # get the kerberos token if necessary to execute commands as oozie
+  if params.security_enabled:
+    oozie_principal_with_host = params.oozie_principal.replace("_HOST", params.hostname)
+    command = format("{kinit_path_local} -kt {oozie_keytab} {oozie_principal_with_host}")
+    Execute(command, user=params.oozie_user, logoutput=True)
+
+  # setup environment
+  environment = { "CATALINA_BASE" : "/usr/iop/current/oozie-server/oozie-server",
+    "OOZIE_HOME" : "/usr/iop/current/oozie-server" }
+
+  # prepare the oozie WAR
+  command = format("{oozie_setup_sh} prepare-war")
+  return_code, oozie_output = shell.call(command, user=params.oozie_user,
+    logoutput=True, quiet=False, env=environment)
+
+  # set it to "" in to prevent a possible iteration issue
+  if oozie_output is None:
+    oozie_output = ""
+
+  if return_code != 0 or "New Oozie WAR file with added".lower() not in oozie_output.lower():
+    message = "Unexpected Oozie WAR preparation output {0}".format(oozie_output)
+    Logger.error(message)
+    raise Fail(message)

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/oozie_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/oozie_service.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/oozie_service.py
new file mode 100755
index 0000000..485686f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/oozie_service.py
@@ -0,0 +1,124 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+import time
+from resource_management import *
+from resource_management.core.shell import as_user
+from resource_management.libraries.providers.hdfs_resource import WebHDFSUtil
+from ambari_commons import OSConst
+
+def oozie_service(action = 'start', upgrade_type=None):
+  """
+  Starts or stops the Oozie service
+  :param action: 'start' or 'stop'
+  :param upgrade_type: type of upgrade, either "rolling" or "non_rolling"
+  skipped since a variation of them was performed during the rolling upgrade
+  :return:
+  """
+  import params
+
+  environment={'OOZIE_CONFIG': params.conf_dir}
+
+  if params.security_enabled:
+    if params.oozie_principal is None:
+      oozie_principal_with_host = 'missing_principal'
+    else:
+      oozie_principal_with_host = params.oozie_principal.replace("_HOST", params.hostname)
+    kinit_if_needed = format("{kinit_path_local} -kt {oozie_keytab} {oozie_principal_with_host};")
+  else:
+    kinit_if_needed = ""
+
+  no_op_test = as_user(format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1"), user=params.oozie_user)
+
+  if action == 'start':
+    start_cmd = format("cd {oozie_tmp_dir} && {oozie_home}/bin/oozie-start.sh")
+
+    if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or \
+       params.jdbc_driver_name == "org.postgresql.Driver" or \
+       params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
+      db_connection_check_command = format("{java_home}/bin/java -cp {check_db_connection_jar}:{target} org.apache.ambari.server.DBConnectionVerification '{oozie_jdbc_connection_url}' {oozie_metastore_user_name} {oozie_metastore_user_passwd!p} {jdbc_driver_name}")
+    else:
+      db_connection_check_command = None
+
+    if upgrade_type is None:
+
+      if not os.path.isfile(params.target) and params.jdbc_driver_name == "org.postgresql.Driver":
+        print format("ERROR: jdbc file {target} is unavailable. Please, follow next steps:\n" \
+          "1) Download postgresql-9.0-801.jdbc4.jar.\n2) Create needed directory: mkdir -p {oozie_home}/libserver/\n" \
+          "3) Copy postgresql-9.0-801.jdbc4.jar to newly created dir: cp /path/to/jdbc/postgresql-9.0-801.jdbc4.jar " \
+          "{oozie_home}/libserver/\n4) Copy postgresql-9.0-801.jdbc4.jar to libext: cp " \
+          "/path/to/jdbc/postgresql-9.0-801.jdbc4.jar {oozie_home}/libext/\n")
+        exit(1)
+
+      if db_connection_check_command:
+        Execute( db_connection_check_command,
+                 tries=5,
+                 try_sleep=10,
+                 user=params.oozie_user,
+        )
+
+      Execute( format("cd {oozie_tmp_dir} && {oozie_home}/bin/ooziedb.sh create -sqlfile oozie.sql -run"),
+               user = params.oozie_user, not_if = no_op_test,
+               ignore_failures = True
+      )
+
+      if params.security_enabled:
+        Execute(kinit_if_needed,
+                user = params.oozie_user,
+        )
+      if params.host_sys_prepped:
+        print "Skipping creation of oozie sharelib as host is sys prepped"
+        hdfs_share_dir_exists = True # skip time-expensive hadoop fs -ls check
+      elif WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs):
+        # check with webhdfs is much faster than executing hadoop fs -ls.
+        util = WebHDFSUtil(params.hdfs_site, params.oozie_user, params.security_enabled)
+        list_status = util.run_command(params.hdfs_share_dir, 'GETFILESTATUS', method='GET', ignore_status_codes=['404'], assertable_result=False)
+        hdfs_share_dir_exists = ('FileStatus' in list_status)
+      else:
+        # have to do time expensive hadoop fs -ls check.
+        hdfs_share_dir_exists = shell.call(format("{kinit_if_needed} hadoop --config {hadoop_conf_dir} dfs -ls {hdfs_share_dir} | awk 'BEGIN {{count=0;}} /share/ {{count++}} END {{if (count > 0) {{exit 0}} else {{exit 1}}}}'"),
+                                 user=params.oozie_user)[0]
+
+      if not hdfs_share_dir_exists:
+        Execute( params.put_shared_lib_to_hdfs_cmd,
+                 user = params.oozie_user,
+                 path = params.execute_path
+        )
+        params.HdfsResource(format("{oozie_hdfs_user_dir}/share"),
+                             type="directory",
+                             action="create_on_execute",
+                             mode=0755,
+                             recursive_chmod=True,
+        )
+        params.HdfsResource(None, action="execute")
+
+
+    # start oozie
+    Execute( start_cmd, environment=environment, user = params.oozie_user,
+	  not_if = no_op_test )
+
+  elif action == 'stop':
+    stop_cmd  = format("cd {oozie_tmp_dir} && {oozie_home}/bin/oozie-stop.sh")
+    # stop oozie
+    Execute(stop_cmd, environment=environment, only_if  = no_op_test,
+      user = params.oozie_user)
+    File(params.pid_file, action = "delete")
+    # Wait a bit more to wait database(Derby) shutdown completely, since it only allow one JVM connected.
+    time.sleep(10)

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/params.py
new file mode 100755
index 0000000..febd3e6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/params.py
@@ -0,0 +1,259 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management import *
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+from resource_management.core import System
+from resource_management.libraries import Script
+from resource_management.libraries.functions import default
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions import get_port_from_url
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.version import compare_versions
+from resource_management.libraries.resources import HdfsResource
+from resource_management.libraries.functions import conf_select, stack_select
+from urlparse import urlparse
+import status_params
+import itertools
+import os
+
+# server configurations
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+sudo = AMBARI_SUDO_BINARY
+
+hostname = config["hostname"]
+
+# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
+version = default("/commandParams/version", None)
+stack_name = default("/hostLevelParams/stack_name", None)
+upgrade_direction = default("/commandParams/upgrade_direction", None)
+
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+stack_version = format_stack_version(stack_version_unformatted)
+
+#hadoop params
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hadoop_lib_home = stack_select.get_hadoop_dir("lib")
+
+
+# if this is a server action, then use the server binaries; smoke tests
+# use the client binaries
+server_role_dir_mapping = { 'OOZIE_SERVER' : 'oozie-server',
+  'OOZIE_SERVICE_CHECK' : 'oozie-client' }
+
+command_role = default("/role", "")
+if command_role not in server_role_dir_mapping:
+  command_role = 'OOZIE_SERVICE_CHECK'
+
+oozie_root = server_role_dir_mapping[command_role]
+
+
+# using the correct oozie root dir, format the correct location
+oozie_lib_dir = format("/usr/iop/current/{oozie_root}")
+oozie_setup_sh = format("/usr/iop/current/{oozie_root}/bin/oozie-setup.sh")
+oozie_webapps_dir = format("/usr/iop/current/{oozie_root}/oozie-server/webapps")
+oozie_webapps_conf_dir = format("/usr/iop/current/{oozie_root}/oozie-server/conf")
+oozie_libext_dir = format("/usr/iop/current/{oozie_root}/libext")
+#oozie_libext_customer_dir = format("/usr/iop/current/{oozie_root}/libext-customer")
+oozie_server_dir = format("/usr/iop/current/{oozie_root}/oozie-server")
+oozie_shared_lib = format("/usr/iop/current/{oozie_root}/share")
+oozie_home = format("/usr/iop/current/{oozie_root}")
+oozie_bin_dir = format("/usr/iop/current/{oozie_root}/bin")
+oozie_examples_regex = format("/usr/iop/current/{oozie_root}/doc")
+
+# set the falcon home for copying JARs; if in an upgrade, then use the version of falcon that matches the version of oozie
+falcon_home = '/usr/iop/current/falcon-client'
+if stack_version is not None:
+  falcon_home = '/usr/iop/{0}/falcon'.format(stack_version)
+execute_path = oozie_bin_dir + os.pathsep + hadoop_bin_dir
+
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+conf_dir = status_params.conf_dir
+hive_conf_dir = format("{conf_dir}/action-conf/hive")
+oozie_user = config['configurations']['oozie-env']['oozie_user']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
+oozie_admin_users = format(config['configurations']['oozie-env']['oozie_admin_users'])
+user_group = config['configurations']['cluster-env']['user_group']
+jdk_location = config['hostLevelParams']['jdk_location']
+check_db_connection_jar_name = "DBConnectionVerification.jar"
+check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
+oozie_tmp_dir = "/var/tmp/oozie"
+oozie_hdfs_user_dir = format("/user/{oozie_user}")
+oozie_pid_dir = status_params.oozie_pid_dir
+pid_file = status_params.pid_file
+hadoop_jar_location = "/usr/lib/hadoop/"
+java_share_dir = "/usr/share/java"
+# Dependency on ext_js  not supported on IOP
+#ext_js_file = "ext-2.2.zip"
+#ext_js_path = "/usr/share/iop-oozie/ext-2.2.zip"
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+oozie_initial_heapsize = str(config['configurations']['oozie-env']['oozie_initial_heapsize']).rstrip('m') + 'm'
+oozie_heapsize = str(config['configurations']['oozie-env']['oozie_heapsize']).rstrip('m') + 'm'
+oozie_permsize = str(config['configurations']['oozie-env']['oozie_permsize']).rstrip('m') + 'm'
+
+kinit_path_local = get_kinit_path()
+oozie_service_keytab = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.keytab.file']
+oozie_principal = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.kerberos.principal']
+http_principal = config['configurations']['oozie-site']['oozie.authentication.kerberos.principal']
+oozie_site = config['configurations']['oozie-site']
+if security_enabled:
+  #older versions of oozie have problems when using _HOST in principal
+  #by testing, newer versions of oozie also need this replacement
+  oozie_site = dict(config['configurations']['oozie-site'])
+  oozie_site['oozie.service.HadoopAccessorService.kerberos.principal'] = \
+    oozie_principal.replace('_HOST', hostname)
+  oozie_site['oozie.authentication.kerberos.principal'] = \
+    http_principal.replace('_HOST', hostname)
+
+smokeuser_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+oozie_keytab = default("/configurations/oozie-env/oozie_keytab", oozie_service_keytab)
+oozie_env_sh_template = config['configurations']['oozie-env']['content']
+
+oracle_driver_jar_name = "ojdbc6.jar"
+
+java_home = config['hostLevelParams']['java_home']
+java_version = config['hostLevelParams']['java_version']
+oozie_metastore_user_name = config['configurations']['oozie-site']['oozie.service.JPAService.jdbc.username']
+oozie_metastore_user_passwd = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.password","")
+oozie_jdbc_connection_url = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.url", "")
+oozie_log_dir = config['configurations']['oozie-env']['oozie_log_dir']
+oozie_data_dir = config['configurations']['oozie-env']['oozie_data_dir']
+oozie_server_port = get_port_from_url(config['configurations']['oozie-site']['oozie.base.url'])
+oozie_server_admin_port = config['configurations']['oozie-env']['oozie_admin_port']
+if 'export OOZIE_HTTPS_PORT' in oozie_env_sh_template or 'oozie.https.port' in config['configurations']['oozie-site'] or 'oozie.https.keystore.file' in config['configurations']['oozie-site'] or 'oozie.https.keystore.pass' in config['configurations']['oozie-site']:
+  oozie_secure = '-secure'
+else:
+  oozie_secure = ''
+fs_root = config['configurations']['core-site']['fs.defaultFS']
+
+put_shared_lib_to_hdfs_cmd = format("{oozie_setup_sh} sharelib create -fs {fs_root} -locallib {oozie_shared_lib}")
+
+jdbc_driver_name = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.driver", "")
+
+if jdbc_driver_name == "com.mysql.jdbc.Driver":
+  #jdbc_driver_jar = "/usr/share/java/mysql-connector-java.jar"
+  jdbc_driver_jar = "mysql-connector-java.jar"
+  jdbc_symlink_name = "mysql-jdbc-driver.jar"
+elif jdbc_driver_name == "org.postgresql.Driver":
+  jdbc_driver_jar = format("{oozie_home}/libserver/postgresql-9.0-801.jdbc4.jar")  #oozie using it's own postgres jdbc
+  jdbc_symlink_name = "postgres-jdbc-driver.jar"
+elif jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
+  #jdbc_driver_jar = "/usr/share/java/ojdbc6.jar"
+  jdbc_driver_jar = "ojdbc.jar"
+  jdbc_symlink_name = "oracle-jdbc-driver.jar"
+else:
+  jdbc_driver_jar = ""
+  jdbc_symlink_name = ""
+
+driver_curl_source = format("{jdk_location}/{jdbc_symlink_name}")
+driver_curl_target = format("{java_share_dir}/{jdbc_driver_jar}")
+downloaded_custom_connector = format("{tmp_dir}/{jdbc_driver_jar}")
+if jdbc_driver_name == "org.postgresql.Driver":
+  target = jdbc_driver_jar
+else:
+  target = format("{oozie_libext_dir}/{jdbc_driver_jar}")
+
+hostname = config["hostname"]
+hdfs_share_dir = format("{oozie_hdfs_user_dir}/share")
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+falcon_host = default("/clusterHostInfo/falcon_server_hosts", [])
+has_falcon_host = not len(falcon_host)  == 0
+
+#oozie-log4j.properties
+if (('oozie-log4j' in config['configurations']) and ('content' in config['configurations']['oozie-log4j'])):
+  log4j_props = config['configurations']['oozie-log4j']['content']
+else:
+  log4j_props = None
+
+oozie_hdfs_user_mode = 0775
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+
+https_port = None
+# try to get https port form oozie-env content
+for line in oozie_env_sh_template.splitlines():
+  result = re.match(r"export\s+OOZIE_HTTPS_PORT=(\d+)", line)
+  if result is not None:
+    https_port = result.group(1)
+# or from oozie-site.xml
+if https_port is None and 'oozie.https.port' in config['configurations']['oozie-site']:
+  https_port = config['configurations']['oozie-site']['oozie.https.port']
+
+oozie_base_url = config['configurations']['oozie-site']['oozie.base.url']
+
+# construct proper url for https
+if https_port is not None:
+  parsed_url = urlparse(oozie_base_url)
+  oozie_base_url = oozie_base_url.replace(parsed_url.scheme, "https")
+  if parsed_url.port is None:
+    oozie_base_url.replace(parsed_url.hostname, ":".join([parsed_url.hostname, str(https_port)]))
+  else:
+    oozie_base_url = oozie_base_url.replace(str(parsed_url.port), str(https_port))
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled']
+host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+
+import functools
+#create partial functions with common arguments for every HdfsResource call
+#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
+)
+
+#LZO support
+
+#-----LZO is not suppported in IOP distribution since it is GPL license--------
+
+'''
+io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
+lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
+
+# stack_is_iop40_or_further
+underscored_version = stack_version_unformatted.replace('.', '_')
+dashed_version = stack_version_unformatted.replace('.', '-')
+lzo_packages_to_family = {
+  "any": ["hadoop-lzo", ],
+  "redhat": ["lzo", "hadoop-lzo-native"],
+  "suse": ["lzo", "hadoop-lzo-native"],
+  "ubuntu": ["liblzo2-2", ]
+}
+
+
+lzo_packages_to_family["redhat"] += [format("hadooplzo_{underscorred_version}_*")]
+lzo_packages_to_family["suse"] += [format("hadooplzo_{underscorred_version}_*")]
+lzo_packages_to_family["ubuntu"] += [format("hadooplzo_{dashed_version}_*")]
+
+lzo_packages_for_current_host = lzo_packages_to_family['any'] + lzo_packages_to_family[System.get_instance().os_family]
+all_lzo_packages = set(itertools.chain(*lzo_packages_to_family.values()))
+'''

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/service_check.py
new file mode 100755
index 0000000..fdfd552
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/service_check.py
@@ -0,0 +1,140 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+import glob
+
+from resource_management.core.resources.system import Execute
+from resource_management.core.resources import File
+from resource_management.core.source import StaticFile
+from resource_management.core.system import System
+from resource_management.libraries.functions import format
+from resource_management.libraries.script import Script
+from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.core.exceptions import Fail
+from ambari_commons.os_family_impl import OsFamilyImpl
+from ambari_commons import OSConst
+
+from resource_management.core.logger import Logger
+
+class OozieServiceCheck(Script):
+  pass
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class OozieServiceCheckDefault(OozieServiceCheck):
+
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    prepare_hdfs_file_name = 'prepareOozieHdfsDirectories.sh'
+    smoke_test_file_name = 'oozieSmoke2.sh'
+
+    if 'yarn-site' in params.config['configurations']:
+      XmlConfig("yarn-site.xml",
+                conf_dir=params.hadoop_conf_dir,
+                configurations=params.config['configurations']['yarn-site'],
+                owner=params.hdfs_user,
+                group=params.user_group,
+                mode=0644
+      )
+    else:
+      raise Fail("yarn-site.xml was not present in config parameters.")
+
+    OozieServiceCheckDefault.oozie_smoke_shell_file(smoke_test_file_name, prepare_hdfs_file_name)
+
+  @staticmethod
+  def oozie_smoke_shell_file(file_name, prepare_hdfs_file_name):
+    import params
+
+    File(format("{tmp_dir}/{file_name}"),
+         content=StaticFile(file_name),
+         mode=0755
+    )
+    File(format("{tmp_dir}/{prepare_hdfs_file_name}"),
+         content=StaticFile(prepare_hdfs_file_name),
+         mode=0755
+    )
+
+    os_family = System.get_instance().os_family
+    oozie_examples_dir = glob.glob(params.oozie_examples_regex)[0]
+    oozie_examples_tar_file = os.path.join(oozie_examples_dir,"oozie-examples.tar.gz");
+    if not os.path.isfile(oozie_examples_tar_file):
+      oozie_examples_dir = glob.glob(os.path.join(oozie_examples_dir,"oozie-4.2.0_IBM*"))[0]
+
+    Execute(format("{tmp_dir}/{prepare_hdfs_file_name} {conf_dir} {oozie_examples_dir} {hadoop_conf_dir} "),
+            tries=3,
+            try_sleep=5,
+            logoutput=True
+    )
+
+    examples_dir = format('/user/{smokeuser}/examples')
+    params.HdfsResource(examples_dir,
+                        action = "delete_on_execute",
+                        type = "directory"
+    )
+    params.HdfsResource(examples_dir,
+      action = "create_on_execute",
+      type = "directory",
+      source = format("{oozie_examples_dir}/examples"),
+      owner = params.smokeuser,
+      group = params.user_group
+    )
+
+    input_data_dir = format('/user/{smokeuser}/input-data')
+    params.HdfsResource(input_data_dir,
+                        action = "delete_on_execute",
+                        type = "directory"
+    )
+    params.HdfsResource(input_data_dir,
+      action = "create_on_execute",
+      type = "directory",
+      source = format("{oozie_examples_dir}/examples/input-data"),
+      owner = params.smokeuser,
+      group = params.user_group
+    )
+    params.HdfsResource(None, action="execute")
+
+    if params.security_enabled:
+      sh_cmd = format(
+        "{tmp_dir}/{file_name} {os_family} {oozie_lib_dir} {conf_dir} {oozie_bin_dir} {oozie_base_url} {oozie_examples_dir} {hadoop_conf_dir} {hadoop_bin_dir} {smokeuser} {security_enabled} {smokeuser_keytab} {kinit_path_local} {smokeuser_principal}")
+    else:
+      sh_cmd = format(
+        "{tmp_dir}/{file_name} {os_family} {oozie_lib_dir} {conf_dir} {oozie_bin_dir} {oozie_base_url} {oozie_examples_dir} {hadoop_conf_dir} {hadoop_bin_dir} {smokeuser} {security_enabled}")
+
+    Execute(sh_cmd,
+            path=params.execute_path,
+            tries=3,
+            try_sleep=5,
+            logoutput=True
+    )
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class OozieServiceCheckWindows(OozieServiceCheck):
+
+  def service_check(self, env):
+    import params
+
+    env.set_params(params)
+    smoke_cmd = os.path.join(params.hdp_root, "Run-SmokeTests.cmd")
+    service = "OOZIE"
+    Execute(format("cmd /C {smoke_cmd} {service}"), logoutput=True)
+
+if __name__ == "__main__":
+  OozieServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/status_params.py
new file mode 100755
index 0000000..58d5b86
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/status_params.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
+
+# a map of the Ambari role to the component name
+# for use with /usr/iop/current/<component>
+SERVER_ROLE_DIRECTORY_MAP = {
+  'OOZIE_SERVER' : 'oozie-server',
+  'OOZIE_CLIENT' : 'oozie-client',
+  'OOZIE_SERVICE_CHECK' : 'oozie-client',
+  'ru_execute_tasks' : 'oozie-server'
+}
+
+component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "OOZIE_CLIENT")
+
+config = Script.get_config()
+
+oozie_pid_dir = config['configurations']['oozie-env']['oozie_pid_dir']
+pid_file = format("{oozie_pid_dir}/oozie.pid")
+
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+conf_dir = format("/usr/iop/current/{component_directory}/conf")
+tmp_dir = Script.get_tmp_dir()
+oozie_user = config['configurations']['oozie-env']['oozie_user']
+hostname = config["hostname"]

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/templates/adminusers.txt.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/templates/adminusers.txt.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/templates/adminusers.txt.j2
new file mode 100755
index 0000000..2a0f7b2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/templates/adminusers.txt.j2
@@ -0,0 +1,28 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Users should be set using following rules:
+#
+#     One user name per line
+#     Empty lines and lines starting with '#' are ignored
+
+{% if oozie_admin_users %}
+{% for oozie_admin_user in oozie_admin_users.split(',') %}
+{{oozie_admin_user|trim}}
+{% endfor %}
+{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/templates/oozie-log4j.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/templates/oozie-log4j.properties.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/templates/oozie-log4j.properties.j2
new file mode 100755
index 0000000..e39428f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/templates/oozie-log4j.properties.j2
@@ -0,0 +1,93 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License. See accompanying LICENSE file.
+#
+
+# If the Java System property 'oozie.log.dir' is not defined at Oozie start up time
+# XLogService sets its value to '${oozie.home}/logs'
+
+log4j.appender.oozie=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozie.File=${oozie.log.dir}/oozie.log
+log4j.appender.oozie.Append=true
+log4j.appender.oozie.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozie.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - SERVER[${oozie.instance.id}] %m%n
+log4j.appender.oozie.RollingPolicy.FileNamePattern=${log4j.appender.oozie.File}-%d{yyyy-MM-dd}
+log4j.appender.oozie.DatePattern='.'yyyy-MM-dd
+
+log4j.appender.oozieops=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieops.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieops.File=${oozie.log.dir}/oozie-ops.log
+log4j.appender.oozieops.Append=true
+log4j.appender.oozieops.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieops.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.oozieinstrumentation=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieinstrumentation.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieinstrumentation.File=${oozie.log.dir}/oozie-instrumentation.log
+log4j.appender.oozieinstrumentation.Append=true
+log4j.appender.oozieinstrumentation.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieinstrumentation.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.oozieaudit=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieaudit.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieaudit.File=${oozie.log.dir}/oozie-audit.log
+log4j.appender.oozieaudit.Append=true
+log4j.appender.oozieaudit.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieaudit.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.openjpa=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.openjpa.DatePattern='.'yyyy-MM-dd
+log4j.appender.openjpa.File=${oozie.log.dir}/oozie-jpa.log
+log4j.appender.openjpa.Append=true
+log4j.appender.openjpa.layout=org.apache.log4j.PatternLayout
+log4j.appender.openjpa.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.logger.openjpa=INFO, openjpa
+log4j.logger.oozieops=INFO, oozieops
+log4j.logger.oozieinstrumentation=ALL, oozieinstrumentation
+log4j.logger.oozieaudit=ALL, oozieaudit
+log4j.logger.org.apache.oozie=INFO, oozie
+log4j.logger.org.apache.hadoop=WARN, oozie
+log4j.logger.org.mortbay=WARN, oozie
+log4j.logger.org.hsqldb=WARN, oozie
+log4j.logger.org.apache.hadoop.security.authentication.server=INFO, oozie

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/configuration/pig-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/configuration/pig-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/configuration/pig-env.xml
new file mode 100755
index 0000000..4cec418
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/configuration/pig-env.xml
@@ -0,0 +1,38 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <!-- pig-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for pig-env.sh file</description>
+    <value>
+JAVA_HOME={{java64_home}}
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+
+#if [ -d "/usr/lib/tez" ]; then
+#  PIG_OPTS="$PIG_OPTS -Dmapreduce.framework.name=yarn"
+#fi
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/configuration/pig-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/configuration/pig-log4j.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/configuration/pig-log4j.xml
new file mode 100755
index 0000000..4f656f4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/configuration/pig-log4j.xml
@@ -0,0 +1,65 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <description>Custom log4j.properties</description>
+    <value>
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+# ***** Set root logger level to DEBUG and its only appender to A.
+log4j.logger.org.apache.pig=info, A
+
+# ***** A is set to be a ConsoleAppender.
+log4j.appender.A=org.apache.log4j.ConsoleAppender
+# ***** A uses PatternLayout.
+log4j.appender.A.layout=org.apache.log4j.PatternLayout
+log4j.appender.A.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n
+    </value>
+    <value-attributes>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/configuration/pig-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/configuration/pig-properties.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/configuration/pig-properties.xml
new file mode 100755
index 0000000..ea0fab5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/configuration/pig-properties.xml
@@ -0,0 +1,631 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <value>
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# Pig configuration file. All values can be overwritten by command line
+# arguments; for a description of the properties, run
+#
+#     pig -h properties
+#
+
+############################################################################
+#
+# == Logging properties
+#
+
+# Location of pig log file. If blank, a file with a timestamped slug
+# ('pig_1399336559369.log') will be generated in the current working directory.
+#
+# pig.logfile=
+# pig.logfile=/tmp/pig-err.log
+
+# Log4j configuration file. Set at runtime with the -4 parameter. The source
+# distribution has a ./conf/log4j.properties.template file you can rename and
+# customize.
+#
+# log4jconf=./conf/log4j.properties
+
+# Verbose Output.
+# * false (default): print only INFO and above to screen
+# * true: Print all log messages to screen
+#
+# verbose=false
+
+# Omit timestamps on log messages. (default: false)
+#
+# brief=false
+
+# Logging level. debug=OFF|ERROR|WARN|INFO|DEBUG (default: INFO)
+#
+# debug=INFO
+
+# Roll up warnings across tasks, so that when millions of mappers suddenly cry
+# out in error they are partially silenced. (default, recommended: true)
+#
+# aggregate.warning=true
+
+# Should DESCRIBE pretty-print its schema?
+# * false (default): print on a single-line, suitable for pasting back in to your script
+# * true (recommended): prints on multiple lines with indentation, much more readable
+#
+# pig.pretty.print.schema=false
+
+# === Profiling UDFs  ===
+
+# Turn on UDF timers? This will cause two counters to be
+# tracked for every UDF and LoadFunc in your script: approx_microsecs measures
+# approximate time spent inside a UDF approx_invocations reports the approximate
+# number of times the UDF was invoked.
+#
+# * false (default): do not record timing information of UDFs.
+# * true: report UDF performance. Uses more counters, but gives more insight
+#   into script operation
+#
+# pig.udf.profile=false
+
+# Specify frequency of profiling (default: every 100th).
+# pig.udf.profile.frequency=100
+
+############################################################################
+#
+# == Site-specific Properties
+#
+
+# Execution Mode. Local mode is much faster, but only suitable for small amounts
+# of data. Local mode interprets paths on the local file system; Mapreduce mode
+# on the HDFS. Read more under 'Execution Modes' within the Getting Started
+# documentation.
+#
+# * mapreduce (default): use the Hadoop cluster defined in your Hadoop config files
+# * local: use local mode
+#
+# exectype=mapreduce
+
+# Bootstrap file with default statements to execute in every Pig job, similar to
+# .bashrc.  If blank, uses the file '.pigbootup' from your home directory; If a
+# value is supplied, that file is NOT loaded.  This does not do tilde expansion
+# -- you must supply the full path to the file.
+#
+# pig.load.default.statements=
+# pig.load.default.statements=/home/bob/.pigrc
+
+# Kill all waiting/running MR jobs upon a MR job failure? (default: false) If
+# false, jobs that can proceed independently will do so unless a parent stage
+# fails. If true, the failure of any stage in the script kills all jobs.
+#
+# stop.on.failure=false
+
+# File containing the pig script to run. Rarely set in the properties file.
+# Commandline: -f
+#
+# file=
+
+# Jarfile to load, colon separated. Rarely used.
+#
+# jar=
+
+# Register additional .jar files to use with your Pig script.
+# Most typically used as a command line option (see http://pig.apache.org/docs/r0.12.0/basic.html#register):
+#
+#     pig -Dpig.additional.jars=hdfs://nn.mydomain.com:9020/myjars/my.jar
+#
+# pig.additional.jars=&lt;colon separated list of jars with optional wildcards&gt;
+# pig.additional.jars=/usr/local/share/pig/pig/contrib/piggybank/java/piggybank.jar:/usr/local/share/pig/datafu/datafu-pig/build/libs/datafu-pig-1.2.1.jar
+
+# Specify potential packages to which a UDF or a group of UDFs belong,
+# eliminating the need to qualify the UDF on every call. See
+# http://pig.apache.org/docs/r0.12.0/udf.html#use-short-names
+#
+# Commandline use:
+#
+#     pig \
+#       -Dpig.additional.jars=$PIG_HOME/contrib/piggybank/java/piggybank.jar:$PIG_HOME/../datafu/datafu-pig/build/libs/datafu-pig-1.2.1.jar \
+#       -Dudf.import.list=org.apache.pig.piggybank.evaluation:datafu.pig.util \
+#       happy_job.pig
+#
+# udf.import.list=&lt;colon separated list of imports&gt;
+# udf.import.list=org.apache.pig.piggybank.evaluation:datafu.pig.bags:datafu.pig.hash:datafu.pig.stats:datafu.pig.util
+
+#
+# Reuse jars across jobs run by the same user? (default: false) If enabled, jars
+# are placed in ${pig.user.cache.location}/${user.name}/.pigcache. Since most
+# jars change infrequently, this gives a minor speedup.
+#
+# pig.user.cache.enabled=false
+
+# Base path for storing jars cached by the pig.user.cache.enabled feature. (default: /tmp)
+#
+# pig.user.cache.location=/tmp
+
+# Replication factor for cached jars. If not specified mapred.submit.replication
+# is used, whose default is 10.
+#
+# pig.user.cache.replication=10
+
+# Default UTC offset. (default: the host's current UTC offset) Supply a UTC
+# offset in Java's timezone format: e.g., +08:00.
+#
+# pig.datetime.default.tz=
+
+############################################################################
+#
+# Memory impacting properties
+#
+
+# Amount of memory (as fraction of heap) allocated to bags before a spill is
+# forced. Default is 0.2, meaning 20% of available memory. Note that this memory
+# is shared across all large bags used by the application. See
+# http://pig.apache.org/docs/r0.12.0/perf.html#memory-management
+#
+# pig.cachedbag.memusage=0.2
+
+# Don't spill bags smaller than this size (bytes). Default: 5000000, or about
+# 5MB. Usually, the more spilling the longer runtime, so you might want to tune
+# it according to heap size of each task and so forth.
+#
+# pig.spill.size.threshold=5000000
+
+# EXPERIMENTAL: If a file bigger than this size (bytes) is spilled -- thus
+# freeing a bunch of ram -- tell the JVM to perform garbage collection.  This
+# should help reduce the number of files being spilled, but causes more-frequent
+# garbage collection. Default: 40000000 (about 40 MB)
+#
+# pig.spill.gc.activation.size=40000000
+
+# Maximum amount of data to replicate using the distributed cache when doing
+# fragment-replicated join. (default: 1000000000, about 1GB) Consider increasing
+# this in a production environment, but carefully.
+#
+# pig.join.replicated.max.bytes=1000000000
+
+# Fraction of heap available for the reducer to perform a skewed join. A low
+# fraction forces Pig to use more reducers, but increases the copying cost. See
+# http://pig.apache.org/docs/r0.12.0/perf.html#skewed-joins
+#
+# pig.skewedjoin.reduce.memusage=0.3
+
+#
+# === SchemaTuple ===
+#
+# The SchemaTuple feature (PIG-2632) uses a tuple's schema (when known) to
+# generate a custom Java class to hold records. Otherwise, tuples are loaded as
+# a plain list that is unaware of its contents' schema -- and so each element
+# has to be wrapped as a Java object on its own. This can provide more efficient
+# CPU utilization, serialization, and most of all memory usage.
+#
+# This feature is considered experimental and is off by default. You can
+# selectively enable it for specific operations using pig.schematuple.udf,
+# pig.schematuple.load, pig.schematuple.fr_join and pig.schematuple.merge_join
+#
+
+# Enable the SchemaTuple optimization in all available cases? (default: false; recommended: true)
+#
+# pig.schematuple=false
+
+# EXPERIMENTAL: Use SchemaTuples with UDFs (default: value of pig.schematuple).
+# pig.schematuple.udf=false
+
+# EXPERIMENTAL, CURRENTLY NOT IMPLEMENTED, but in the future, LoadFunc's with
+# known schemas should output SchemaTuples. (default: value of pig.schematuple)
+# pig.schematuple.load=false
+
+# EXPERIMENTAL: Use SchemaTuples in replicated joins. The potential memory
+# saving here is significant. (default: value of pig.schematuple)
+# pig.schematuple.fr_join=false
+
+# EXPERIMENTAL: Use SchemaTuples in merge joins. (default: value of pig.schematuple).
+# pig.schematuple.merge_join=false
+
+############################################################################
+#
+# Serialization options
+#
+
+# Omit empty part files from the output? (default: false)
+#
+# * false (default): reducers generates an output file, even if output is empty
+# * true (recommended): do not generate zero-byte part files
+#
+# The default behavior of MapReduce is to generate an empty file for no data, so
+# Pig follows that. But many small files can cause annoying extra map tasks and
+# put load on the HDFS, so consider setting this to 'true'
+#
+# pig.output.lazy=false
+
+#
+# === Tempfile Handling
+#
+
+# EXPERIMENTAL: Storage format for temporary files generated by intermediate
+# stages of Pig jobs. This can provide significant speed increases for certain
+# codecs, as reducing the amount of data transferred to and from disk can more
+# than make up for the cost of compression/compression. Recommend that you set
+# up LZO compression in Hadoop and specify tfile storage.
+#
+# Compress temporary files?
+# * false (default): do not compress
+# * true (recommended): compress temporary files.
+#
+# pig.tmpfilecompression=false
+# pig.tmpfilecompression=true
+
+# Tempfile storage container type.
+#
+# * tfile (default, recommended): more efficient, but only supports supports gz(gzip) and lzo compression.
+#   https://issues.apache.org/jira/secure/attachment/12396286/TFile%20Specification%2020081217.pdf
+# * seqfile: only supports gz(gzip), lzo, snappy, and bzip2 compression
+#
+# pig.tmpfilecompression.storage=tfile
+
+# Codec types for intermediate job files. tfile supports gz(gzip) and lzo;
+# seqfile support gz(gzip), lzo, snappy, bzip2
+#
+# * lzo (recommended with caveats): moderate compression, low cpu burden;
+#   typically leads to a noticeable speedup. Best default choice, but you must
+#   set up LZO independently due to license incompatibility
+# * snappy: moderate compression, low cpu burden; typically leads to a noticeable speedup..
+# * gz (default): higher compression, high CPU burden. Typically leads to a noticeable slowdown.
+# * bzip2: most compression, major CPU burden. Typically leads to a noticeable slowdown.
+#
+# pig.tmpfilecompression.codec=gzip
+
+#
+# === Split Combining
+#
+
+#
+# Should pig try to combine small files for fewer map tasks? This improves the
+# efficiency of jobs with many small input files, reduces the overhead on the
+# jobtracker, and reduces the number of output files a map-only job
+# produces. However, it only works with certain loaders and increases non-local
+# map tasks. See http://pig.apache.org/docs/r0.12.0/perf.html#combine-files
+#
+# * false (default, recommended): _do_ combine files
+# * true: do not combine files
+#
+# pig.noSplitCombination=false
+
+#
+# Size, in bytes, of data to be processed by a single map. Smaller files are
+# combined untill this size is reached. If unset, defaults to the file system's
+# default block size.
+#
+# pig.maxCombinedSplitSize=
+
+# ###########################################################################
+#
+# Execution options
+#
+
+# Should pig omit combiners? (default, recommended: false -- meaning pig _will_
+# use combiners)
+#
+# When combiners work well, they eliminate a significant amount of
+# data. However, if they do not eliminate much data -- say, a DISTINCT operation
+# that only eliminates 5% of the records -- they add a noticeable overhead to
+# the job. So the recommended default is false (use combiners), selectively
+# disabling them per-job:
+#
+#     pig -Dpig.exec.nocombiner=true distinct_but_not_too_much.pig
+#
+# pig.exec.nocombiner=false
+
+# EXPERIMENTAL: Aggregate records in map task before sending to the combiner?
+# (default: false, 10; recommended: true, 10). In cases where there is a massive
+# reduction of data in the aggregation step, pig can do a first pass of
+# aggregation before the data even leaves the mapper, saving much serialization
+# overhead. It's off by default but can give a major improvement to
+# group-and-aggregate operations. Pig skips partial aggregation unless reduction
+# is better than a factor of minReduction (default: 10). See
+# http://pig.apache.org/docs/r0.12.0/perf.html#hash-based-aggregation
+#
+# pig.exec.mapPartAgg=false
+# pig.exec.mapPartAgg.minReduction=10
+
+#
+# === Control how many reducers are used.
+#
+
+# Estimate number of reducers naively using a fixed amount of data per
+# reducer. Optimally, you have both fewer reducers than available reduce slots,
+# and reducers that are neither getting too little data (less than a half-GB or
+# so) nor too much data (more than 2-3 times the reducer child process max heap
+# size). The default of 1000000000 (about 1GB) is probably low for a production
+# cluster -- however it's much worse to set this too high (reducers spill many
+# times over in group-sort) than too low (delay waiting for reduce slots).
+#
+# pig.exec.reducers.bytes.per.reducer=1000000000
+
+#
+# Don't ever use more than this many reducers. (default: 999)
+#
+# pig.exec.reducers.max=999
+
+#
+# === Local mode for small jobs
+#
+
+# EXPERIMENTAL: Use local mode for small jobs? If true, jobs with input data
+# size smaller than pig.auto.local.input.maxbytes bytes and one or no reducers
+# are run in local mode, which is much faster. Note that file paths are still
+# interpreted as pig.exectype implies.
+#
+# * true (recommended): allow local mode for small jobs, which is much faster.
+# * false (default): always use pig.exectype.
+#
+# pig.auto.local.enabled=false
+
+#
+# Definition of a small job for the pig.auto.local.enabled feature. Only jobs
+# with less than this may bytes are candidates to run locally (default:
+# 100000000 bytes, about 1GB)
+#
+# pig.auto.local.input.maxbytes=100000000
+
+############################################################################
+#
+# Security Features
+#
+
+# Comma-delimited list of commands/operators that are disallowed. This security
+# feature can be used by administrators to block use of certain commands by
+# users.
+#
+# * &lt;blank&gt; (default): all commands and operators are allowed.
+# * fs,set (for example): block all filesystem commands and config changes from pig scripts.
+#
+# pig.blacklist=
+# pig.blacklist=fs,set
+
+# Comma-delimited list of the only commands/operators that are allowed. This
+# security feature can be used by administrators to block use of certain
+# commands by users.
+#
+# * &lt;blank&gt; (default): all commands and operators not on the pig.blacklist are allowed.
+# * load,store,filter,group: only LOAD, STORE, FILTER, GROUP
+#   from pig scripts. All other commands and operators will fail.
+#
+# pig.whitelist=
+# pig.whitelist=load,store,filter,group
+
+#####################################################################
+#
+# Advanced Site-specific Customizations
+#
+
+# Remove intermediate output files?
+#
+# * true (default, recommended): remove the files
+# * false: do NOT remove the files. You must clean them up yourself.
+#
+# Keeping them is useful for advanced debugging, but can be dangerous -- you
+# must clean them up yourself.  Inspect the intermediate outputs with
+#
+#     LOAD '/path/to/tmp/file' USING org.apache.pig.impl.io.TFileStorage();
+#
+# (Or ...SequenceFileInterStorage if pig.tmpfilecompression.storage is seqfile)
+#
+# pig.delete.temp.files=true
+
+# EXPERIMENTAL: A Pig Progress Notification Listener (PPNL) lets you wire pig's
+# progress into your visibility stack. To use a PPNL, supply the fully qualified
+# class name of a PPNL implementation. Note that only one PPNL can be set up, so
+# if you need several, write a PPNL that will chain them.
+#
+# See https://github.com/twitter/ambrose for a pretty awesome one of these
+#
+# pig.notification.listener=&lt;fully qualified class name of a PPNL implementation&gt;
+
+# String argument to pass to your PPNL constructor (optional). Only a single
+# string value is allowed. (default none)
+#
+# pig.notification.listener.arg=&lt;somevalue&gt;
+
+# EXPERIMENTAL: Class invoked to estimate the number of reducers to use.
+# (default: org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.InputSizeReducerEstimator)
+#
+# If you don't know how or why to write a PigReducerEstimator, you're unlikely
+# to use this. By default, the naive mapReduceLayer.InputSizeReducerEstimator is
+# used, but you can specify anything implementing the interface
+# org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigReducerEstimator
+#
+# pig.exec.reducer.estimator=&lt;fully qualified class name of a PigReducerEstimator implementation&gt;
+
+# Optional String argument to pass to your PigReducerEstimator. (default: none;
+# a single String argument is allowed).
+#
+# pig.exec.reducer.estimator.arg=&lt;somevalue&gt;
+
+# Class invoked to report the size of reducers output. By default, the reducers'
+# output is computed as the total size of output files. But not every storage is
+# file-based, and so this logic can be replaced by implementing the interface
+# org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigStatsOutputSizeReader
+# If you need to register more than one reader, you can register them as a comma
+# separated list. Every reader implements a boolean supports(POStore sto) method.
+# When there are more than one reader, they are consulted in order, and the
+# first one whose supports() method returns true will be used.
+#
+# pig.stats.output.size.reader=&lt;fully qualified class name of a PigStatsOutputSizeReader implementation&gt;
+# pig.stats.output.size.reader.unsupported=&lt;comma separated list of StoreFuncs that are not supported by this reader&gt;
+
+# By default, Pig retrieves TaskReports for every launched task to compute
+# various job statistics. But this can cause OOM if the number of tasks is
+# large. In such case, you can disable it by setting this property to true.
+# pig.stats.notaskreport=false
+
+#
+# Override hadoop configs programatically
+#
+# By default, Pig expects hadoop configs (hadoop-site.xml and core-site.xml)
+# to be present on the classpath. There are cases when these configs are
+# needed to be passed programatically, such as while using the PigServer API.
+# In such cases, you can override hadoop configs by setting the property
+# "pig.use.overriden.hadoop.configs".
+#
+# When this property is set to true, Pig ignores looking for hadoop configs
+# in the classpath and instead picks it up from Properties/Configuration
+# object passed to it.
+#
+# pig.use.overriden.hadoop.configs=false
+
+# Implied LoadFunc for the LOAD operation when no USING clause is
+# present. Supply the fully qualified class name of a LoadFunc
+# implementation. Note: setting this means you will have to modify most code
+# brought in from elsewhere on the web, as people generally omit the USING
+# clause for TSV files.
+#
+# * org.apache.pig.builtin.PigStorage (default): the traditional tab-separated-values LoadFunc
+# * my.custom.udfcollection.MyCustomLoadFunc (for example): use MyCustomLoadFunc instead
+#
+# pig.default.load.func=&lt;fully qualified class name of a LoadFunc implementation&gt;
+
+# The implied StoreFunc for STORE operations with no USING clause. Supply the
+# fully qualified class name of a StoreFunc implementation.
+#
+# * org.apache.pig.builtin.PigStorage (default): the traditional tab-separated-values StoreFunc.
+# * my.custom.udfcollection.MyCustomStoreFunc (for example): use MyCustomStoreFunc instead
+#
+# pig.default.store.func=&lt;fully qualified class name of a StoreFunc implementation&gt;
+
+# Recover jobs when the application master is restarted? (default: false). This
+# is a Hadoop 2 specific property; enable it to take advantage of AM recovery.
+#
+# pig.output.committer.recovery.support=true
+
+# Should scripts check to prevent multiple stores writing to the same location?
+# (default: false) When set to true, stops the execution of script right away.
+#
+pig.location.check.strict=false
+
+# In addition to the fs-style commands (rm, ls, etc) Pig can now execute
+# SQL-style DDL commands, eg "sql create table pig_test(name string, age int)".
+# The only implemented backend is hcat, and luckily that's also the default.
+#
+# pig.sql.type=hcat
+
+# Path to the hcat executable, for use with pig.sql.type=hcat (default: null)
+#
+hcat.bin=/usr/local/hcat/bin/hcat
+
+###########################################################################
+#
+# Overrides for extreme environments
+#
+# (Most people won't have to adjust these parameters)
+#
+
+
+# Limit the pig script length placed in the jobconf xml. (default:10240)
+# Extremely long queries can waste space in the JobConf; since its contents are
+# only advisory, the default is fine unless you are retaining it for forensics.
+#
+# pig.script.max.size=10240
+
+# Disable use of counters by Pig. Note that the word 'counter' is singular here.
+#
+# * false (default, recommended): do NOT disable counters.
+# * true: disable counters. Set this to true only when your Pig job will
+#   otherwise die because of using more counters than hadoop configured limit
+#
+# pig.disable.counter=true
+
+# Sample size (per-mapper, in number of rows) the ORDER..BY operation's
+# RandomSampleLoader uses to estimate how your data should be
+# partitioned. (default, recommended: 100 rows per task) Increase this if you
+# have exceptionally large input splits and are unhappy with the reducer skew.
+#
+# pig.random.sampler.sample.size=100
+
+# Process an entire script at once, reducing the amount of work and number of
+# tasks? (default, recommended: true) See http://pig.apache.org/docs/r0.12.0/perf.html#multi-query-execution
+#
+# MultiQuery optimization is very useful, and so the recommended default is
+# true. You may find a that a script fails to compile under MultiQuery. If so,
+# disable it at runtime:
+#
+#     pig -no_multiquery script_that_makes_pig_sad.pig
+#
+# opt.multiquery=true
+
+# For small queries, fetch data directly from the HDFS. (default, recommended:
+# true). If you want to force Pig to launch a MR job, for example when you're
+# testing a live cluster, disable with the -N option. See PIG-3642.
+#
+# opt.fetch=true
+
+###########################################################################
+#
+# Streaming properties
+#
+
+# Define what properties will be set in the streaming environment. Just set this
+# property to a comma-delimited list of properties to set, and those properties
+# will be set in the environment.
+#
+# pig.streaming.environment=&lt;comma-delimited list of propertes&gt;
+
+# Specify a comma-delimited list of local files to ship to distributed cache for
+# streaming job.
+#
+# pig.streaming.ship.files=&lt;comma-delimited list of local files&gt;
+
+# Specify a comma-delimited list of remote files to cache on distributed cache
+# for streaming job.
+#
+# pig.streaming.cache.files=&lt;comma-delimited list of remote files&gt;
+
+# Specify the python command to be used for python streaming udf. By default,
+# python is used, but you can overwrite it with a non-default version such as
+# python2.7.
+#
+# pig.streaming.udf.python.command=python
+
+    </value>
+    <description>Describe all the Pig agent configurations</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/kerberos.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/kerberos.json
new file mode 100755
index 0000000..22dd6cb
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/kerberos.json
@@ -0,0 +1,17 @@
+{
+  "services": [
+    {
+      "name": "PIG",
+      "components": [
+        {
+          "name": "PIG",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/metainfo.xml
new file mode 100755
index 0000000..85318de
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/metainfo.xml
@@ -0,0 +1,86 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>PIG</name>
+      <displayName>Pig</displayName>
+      <comment>Scripting platform for analyzing large datasets</comment>
+      <version>0.15.0</version>
+      <components>
+        <component>
+          <name>PIG</name>
+          <displayName>Pig</displayName>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/pig_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>env</type>
+              <fileName>pig-env.sh</fileName>
+              <dictionaryName>pig-env</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>log4j.properties</fileName>
+              <dictionaryName>pig-log4j</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>pig.properties</fileName>
+              <dictionaryName>pig-properties</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>pig</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <requiredServices>
+        <service>YARN</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>pig-env</config-type>
+        <config-type>pig-log4j</config-type>
+        <config-type>pig-properties</config-type>
+      </configuration-dependencies>
+
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/package/files/pigSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/package/files/pigSmoke.sh b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/package/files/pigSmoke.sh
new file mode 100755
index 0000000..a22456e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/package/files/pigSmoke.sh
@@ -0,0 +1,18 @@
+/*Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License */
+
+A = load 'passwd' using PigStorage(':');
+B = foreach A generate \$0 as id;
+store B into 'pigsmoke.out';

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/package/scripts/params.py
new file mode 100755
index 0000000..89ab726
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/package/scripts/params.py
@@ -0,0 +1,25 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from resource_management.libraries.functions.default import default
+
+from params_linux import *
+
+host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
\ No newline at end of file


[09/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_22.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_22.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_22.py
new file mode 100755
index 0000000..6848635
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_22.py
@@ -0,0 +1,1713 @@
+#!/usr/bin/env ambari-python-wrap
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import math
+from math import floor
+from urlparse import urlparse
+import os
+import fnmatch
+import socket
+import re
+import xml.etree.ElementTree as ET
+
+from resource_management.core.logger import Logger
+
+try:
+  from stack_advisor_21 import *
+except ImportError:
+  #Ignore ImportError
+  print("stack_advisor_21 not found")
+
+class HDP22StackAdvisor(HDP21StackAdvisor):
+
+  def getServiceConfigurationRecommenderDict(self):
+    parentRecommendConfDict = super(HDP22StackAdvisor, self).getServiceConfigurationRecommenderDict()
+    childRecommendConfDict = {
+      "HDFS": self.recommendHDFSConfigurations,
+      "HIVE": self.recommendHIVEConfigurations,
+      "HBASE": self.recommendHBASEConfigurations,
+      "MAPREDUCE2": self.recommendMapReduce2Configurations,
+      "TEZ": self.recommendTezConfigurations,
+      "AMBARI_METRICS": self.recommendAmsConfigurations,
+      "YARN": self.recommendYARNConfigurations,
+      "STORM": self.recommendStormConfigurations,
+      "KNOX": self.recommendKnoxConfigurations,
+      "RANGER": self.recommendRangerConfigurations,
+      "LOGSEARCH" : self.recommendLogsearchConfigurations,
+      "SPARK": self.recommendSparkConfigurations,
+    }
+    parentRecommendConfDict.update(childRecommendConfDict)
+    return parentRecommendConfDict
+
+
+  def recommendSparkConfigurations(self, configurations, clusterData, services, hosts):
+    """
+    :type configurations dict
+    :type clusterData dict
+    :type services dict
+    :type hosts dict
+    """
+    putSparkProperty = self.putProperty(configurations, "spark-defaults", services)
+
+    spark_queue = self.recommendYarnQueue(services, "spark-defaults", "spark.yarn.queue")
+    if spark_queue is not None:
+      putSparkProperty("spark.yarn.queue", spark_queue)
+
+    # add only if spark supports this config
+    if "configurations" in services and "spark-thrift-sparkconf" in services["configurations"]:
+      putSparkThriftSparkConf = self.putProperty(configurations, "spark-thrift-sparkconf", services)
+      recommended_spark_queue = self.recommendYarnQueue(services, "spark-thrift-sparkconf", "spark.yarn.queue")
+      if recommended_spark_queue is not None:
+        putSparkThriftSparkConf("spark.yarn.queue", recommended_spark_queue)
+
+
+  def recommendYARNConfigurations(self, configurations, clusterData, services, hosts):
+    super(HDP22StackAdvisor, self).recommendYARNConfigurations(configurations, clusterData, services, hosts)
+    putYarnProperty = self.putProperty(configurations, "yarn-site", services)
+    putYarnProperty('yarn.nodemanager.resource.cpu-vcores', clusterData['cpu'])
+    putYarnProperty('yarn.scheduler.minimum-allocation-vcores', 1)
+    putYarnProperty('yarn.scheduler.maximum-allocation-vcores', configurations["yarn-site"]["properties"]["yarn.nodemanager.resource.cpu-vcores"])
+    # Property Attributes
+    putYarnPropertyAttribute = self.putPropertyAttribute(configurations, "yarn-site")
+    nodeManagerHost = self.getHostWithComponent("YARN", "NODEMANAGER", services, hosts)
+    if (nodeManagerHost is not None):
+      cpuPercentageLimit = 0.8
+      if "yarn.nodemanager.resource.percentage-physical-cpu-limit" in configurations["yarn-site"]["properties"]:
+        cpuPercentageLimit = float(configurations["yarn-site"]["properties"]["yarn.nodemanager.resource.percentage-physical-cpu-limit"])
+      cpuLimit = max(1, int(floor(nodeManagerHost["Hosts"]["cpu_count"] * cpuPercentageLimit)))
+      putYarnProperty('yarn.nodemanager.resource.cpu-vcores', str(cpuLimit))
+      putYarnProperty('yarn.scheduler.maximum-allocation-vcores', configurations["yarn-site"]["properties"]["yarn.nodemanager.resource.cpu-vcores"])
+      putYarnPropertyAttribute('yarn.nodemanager.resource.memory-mb', 'maximum', int(nodeManagerHost["Hosts"]["total_mem"] / 1024)) # total_mem in kb
+      putYarnPropertyAttribute('yarn.nodemanager.resource.cpu-vcores', 'maximum', nodeManagerHost["Hosts"]["cpu_count"] * 2)
+      putYarnPropertyAttribute('yarn.scheduler.minimum-allocation-vcores', 'maximum', configurations["yarn-site"]["properties"]["yarn.nodemanager.resource.cpu-vcores"])
+      putYarnPropertyAttribute('yarn.scheduler.maximum-allocation-vcores', 'maximum', configurations["yarn-site"]["properties"]["yarn.nodemanager.resource.cpu-vcores"])
+      putYarnPropertyAttribute('yarn.scheduler.minimum-allocation-mb', 'maximum', configurations["yarn-site"]["properties"]["yarn.nodemanager.resource.memory-mb"])
+      putYarnPropertyAttribute('yarn.scheduler.maximum-allocation-mb', 'maximum', configurations["yarn-site"]["properties"]["yarn.nodemanager.resource.memory-mb"])
+
+      # Above is the default calculated 'maximum' values derived purely from hosts.
+      # However, there are 'maximum' and other attributes that actually change based on the values
+      #  of other configs. We need to update those values.
+      if ("yarn-site" in services["configurations"]):
+        if ("yarn.nodemanager.resource.memory-mb" in services["configurations"]["yarn-site"]["properties"]):
+          putYarnPropertyAttribute('yarn.scheduler.maximum-allocation-mb', 'maximum', services["configurations"]["yarn-site"]["properties"]["yarn.nodemanager.resource.memory-mb"])
+          putYarnPropertyAttribute('yarn.scheduler.minimum-allocation-mb', 'maximum', services["configurations"]["yarn-site"]["properties"]["yarn.nodemanager.resource.memory-mb"])
+        if ("yarn.nodemanager.resource.cpu-vcores" in services["configurations"]["yarn-site"]["properties"]):
+          putYarnPropertyAttribute('yarn.scheduler.maximum-allocation-vcores', 'maximum', services["configurations"]["yarn-site"]["properties"]["yarn.nodemanager.resource.cpu-vcores"])
+          putYarnPropertyAttribute('yarn.scheduler.minimum-allocation-vcores', 'maximum', services["configurations"]["yarn-site"]["properties"]["yarn.nodemanager.resource.cpu-vcores"])
+
+      kerberos_authentication_enabled = self.isSecurityEnabled(services)
+      if kerberos_authentication_enabled:
+        putYarnProperty('yarn.nodemanager.container-executor.class',
+                        'org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor')
+
+      if "yarn-env" in services["configurations"] and "yarn_cgroups_enabled" in services["configurations"]["yarn-env"]["properties"]:
+        yarn_cgroups_enabled = services["configurations"]["yarn-env"]["properties"]["yarn_cgroups_enabled"].lower() == "true"
+        if yarn_cgroups_enabled:
+          putYarnProperty('yarn.nodemanager.container-executor.class', 'org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor')
+          putYarnProperty('yarn.nodemanager.linux-container-executor.group', 'hadoop')
+          putYarnProperty('yarn.nodemanager.linux-container-executor.resources-handler.class', 'org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler')
+          putYarnProperty('yarn.nodemanager.linux-container-executor.cgroups.hierarchy', '/yarn')
+          putYarnProperty('yarn.nodemanager.linux-container-executor.cgroups.mount', 'true')
+          putYarnProperty('yarn.nodemanager.linux-container-executor.cgroups.mount-path', '/cgroup')
+        else:
+          if not kerberos_authentication_enabled:
+            putYarnProperty('yarn.nodemanager.container-executor.class', 'org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor')
+          putYarnPropertyAttribute('yarn.nodemanager.linux-container-executor.resources-handler.class', 'delete', 'true')
+          putYarnPropertyAttribute('yarn.nodemanager.linux-container-executor.cgroups.hierarchy', 'delete', 'true')
+          putYarnPropertyAttribute('yarn.nodemanager.linux-container-executor.cgroups.mount', 'delete', 'true')
+          putYarnPropertyAttribute('yarn.nodemanager.linux-container-executor.cgroups.mount-path', 'delete', 'true')
+    # recommend hadoop.registry.rm.enabled based on SLIDER in services
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    if "SLIDER" in servicesList:
+      putYarnProperty('hadoop.registry.rm.enabled', 'true')
+    else:
+      putYarnProperty('hadoop.registry.rm.enabled', 'false')
+
+  def recommendHDFSConfigurations(self, configurations, clusterData, services, hosts):
+    super(HDP22StackAdvisor, self).recommendHDFSConfigurations(configurations, clusterData, services, hosts)
+    putHdfsSiteProperty = self.putProperty(configurations, "hdfs-site", services)
+    putHdfsSitePropertyAttribute = self.putPropertyAttribute(configurations, "hdfs-site")
+    putHdfsSiteProperty("dfs.datanode.max.transfer.threads", 16384 if clusterData["hBaseInstalled"] else 4096)
+
+    dataDirsCount = 1
+    # Use users 'dfs.datanode.data.dir' first
+    if "hdfs-site" in services["configurations"] and "dfs.datanode.data.dir" in services["configurations"]["hdfs-site"]["properties"]:
+      dataDirsCount = len(str(services["configurations"]["hdfs-site"]["properties"]["dfs.datanode.data.dir"]).split(","))
+    elif "dfs.datanode.data.dir" in configurations["hdfs-site"]["properties"]:
+      dataDirsCount = len(str(configurations["hdfs-site"]["properties"]["dfs.datanode.data.dir"]).split(","))
+    if dataDirsCount <= 2:
+      failedVolumesTolerated = 0
+    elif dataDirsCount <= 4:
+      failedVolumesTolerated = 1
+    else:
+      failedVolumesTolerated = 2
+    putHdfsSiteProperty("dfs.datanode.failed.volumes.tolerated", failedVolumesTolerated)
+
+    namenodeHosts = self.getHostsWithComponent("HDFS", "NAMENODE", services, hosts)
+
+    # 25 * # of cores on NameNode
+    nameNodeCores = 4
+    if namenodeHosts is not None and len(namenodeHosts):
+      nameNodeCores = int(namenodeHosts[0]['Hosts']['cpu_count'])
+    putHdfsSiteProperty("dfs.namenode.handler.count", 25 * nameNodeCores)
+    if 25 * nameNodeCores > 200:
+      putHdfsSitePropertyAttribute("dfs.namenode.handler.count", "maximum", 25 * nameNodeCores)
+
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    if ('ranger-hdfs-plugin-properties' in services['configurations']) and ('ranger-hdfs-plugin-enabled' in services['configurations']['ranger-hdfs-plugin-properties']['properties']):
+      rangerPluginEnabled = services['configurations']['ranger-hdfs-plugin-properties']['properties']['ranger-hdfs-plugin-enabled']
+      if ("RANGER" in servicesList) and (rangerPluginEnabled.lower() == 'Yes'.lower()):
+        putHdfsSiteProperty("dfs.permissions.enabled",'true')
+
+    putHdfsSiteProperty("dfs.namenode.safemode.threshold-pct", "0.999" if len(namenodeHosts) > 1 else "1.000")
+
+    putHdfsEnvProperty = self.putProperty(configurations, "hadoop-env", services)
+    putHdfsEnvPropertyAttribute = self.putPropertyAttribute(configurations, "hadoop-env")
+
+    putHdfsEnvProperty('namenode_heapsize', max(int(clusterData['totalAvailableRam'] / 2), 1024))
+
+    nn_heapsize_limit = None
+    if (namenodeHosts is not None and len(namenodeHosts) > 0):
+      if len(namenodeHosts) > 1:
+        nn_max_heapsize = min(int(namenodeHosts[0]["Hosts"]["total_mem"]), int(namenodeHosts[1]["Hosts"]["total_mem"])) / 1024
+        masters_at_host = max(self.getHostComponentsByCategories(namenodeHosts[0]["Hosts"]["host_name"], ["MASTER"], services, hosts),
+                              self.getHostComponentsByCategories(namenodeHosts[1]["Hosts"]["host_name"], ["MASTER"], services, hosts))
+      else:
+        nn_max_heapsize = int(namenodeHosts[0]["Hosts"]["total_mem"] / 1024) # total_mem in kb
+        masters_at_host = self.getHostComponentsByCategories(namenodeHosts[0]["Hosts"]["host_name"], ["MASTER"], services, hosts)
+
+      putHdfsEnvPropertyAttribute('namenode_heapsize', 'maximum', max(nn_max_heapsize, 1024))
+
+      nn_heapsize_limit = nn_max_heapsize
+      nn_heapsize_limit -= clusterData["reservedRam"]
+      if len(masters_at_host) > 1:
+        nn_heapsize_limit = int(nn_heapsize_limit/2)
+
+      putHdfsEnvProperty('namenode_heapsize', max(nn_heapsize_limit, 1024))
+
+
+    datanodeHosts = self.getHostsWithComponent("HDFS", "DATANODE", services, hosts)
+    if datanodeHosts is not None and len(datanodeHosts) > 0:
+      min_datanode_ram_kb = 1073741824 # 1 TB
+      for datanode in datanodeHosts:
+        ram_kb = datanode['Hosts']['total_mem']
+        min_datanode_ram_kb = min(min_datanode_ram_kb, ram_kb)
+
+      datanodeFilesM = len(datanodeHosts)*dataDirsCount/10 # in millions, # of files = # of disks * 100'000
+      nn_memory_configs = [
+        {'nn_heap':1024,  'nn_opt':128},
+        {'nn_heap':3072,  'nn_opt':512},
+        {'nn_heap':5376,  'nn_opt':768},
+        {'nn_heap':9984,  'nn_opt':1280},
+        {'nn_heap':14848, 'nn_opt':2048},
+        {'nn_heap':19456, 'nn_opt':2560},
+        {'nn_heap':24320, 'nn_opt':3072},
+        {'nn_heap':33536, 'nn_opt':4352},
+        {'nn_heap':47872, 'nn_opt':6144},
+        {'nn_heap':59648, 'nn_opt':7680},
+        {'nn_heap':71424, 'nn_opt':8960},
+        {'nn_heap':94976, 'nn_opt':8960}
+      ]
+      index = {
+        datanodeFilesM < 1 : 0,
+        1 <= datanodeFilesM < 5 : 1,
+        5 <= datanodeFilesM < 10 : 2,
+        10 <= datanodeFilesM < 20 : 3,
+        20 <= datanodeFilesM < 30 : 4,
+        30 <= datanodeFilesM < 40 : 5,
+        40 <= datanodeFilesM < 50 : 6,
+        50 <= datanodeFilesM < 70 : 7,
+        70 <= datanodeFilesM < 100 : 8,
+        100 <= datanodeFilesM < 125 : 9,
+        125 <= datanodeFilesM < 150 : 10,
+        150 <= datanodeFilesM : 11
+      }[1]
+
+      nn_memory_config = nn_memory_configs[index]
+
+      #override with new values if applicable
+      if nn_heapsize_limit is not None and nn_memory_config['nn_heap'] <= nn_heapsize_limit:
+        putHdfsEnvProperty('namenode_heapsize', nn_memory_config['nn_heap'])
+
+      putHdfsEnvPropertyAttribute('dtnode_heapsize', 'maximum', int(min_datanode_ram_kb/1024))
+
+    nn_heapsize = int(configurations["hadoop-env"]["properties"]["namenode_heapsize"])
+    putHdfsEnvProperty('namenode_opt_newsize', max(int(nn_heapsize / 8), 128))
+    putHdfsEnvProperty('namenode_opt_maxnewsize', max(int(nn_heapsize / 8), 128))
+
+    putHdfsSitePropertyAttribute = self.putPropertyAttribute(configurations, "hdfs-site")
+    putHdfsSitePropertyAttribute('dfs.datanode.failed.volumes.tolerated', 'maximum', dataDirsCount)
+
+    keyserverHostsString = None
+    keyserverPortString = None
+    if "hadoop-env" in services["configurations"] and "keyserver_host" in services["configurations"]["hadoop-env"]["properties"] and "keyserver_port" in services["configurations"]["hadoop-env"]["properties"]:
+      keyserverHostsString = services["configurations"]["hadoop-env"]["properties"]["keyserver_host"]
+      keyserverPortString = services["configurations"]["hadoop-env"]["properties"]["keyserver_port"]
+
+    # Irrespective of what hadoop-env has, if Ranger-KMS is installed, we use its values.
+    rangerKMSServerHosts = self.getHostsWithComponent("RANGER_KMS", "RANGER_KMS_SERVER", services, hosts)
+    if rangerKMSServerHosts is not None and len(rangerKMSServerHosts) > 0:
+      rangerKMSServerHostsArray = []
+      for rangeKMSServerHost in rangerKMSServerHosts:
+        rangerKMSServerHostsArray.append(rangeKMSServerHost["Hosts"]["host_name"])
+      keyserverHostsString = ";".join(rangerKMSServerHostsArray)
+      if "kms-env" in services["configurations"] and "kms_port" in services["configurations"]["kms-env"]["properties"]:
+        keyserverPortString = services["configurations"]["kms-env"]["properties"]["kms_port"]
+
+    if keyserverHostsString is not None and len(keyserverHostsString.strip()) > 0:
+      urlScheme = "http"
+      if "ranger-kms-site" in services["configurations"] and \
+          "ranger.service.https.attrib.ssl.enabled" in services["configurations"]["ranger-kms-site"]["properties"] and \
+          services["configurations"]["ranger-kms-site"]["properties"]["ranger.service.https.attrib.ssl.enabled"].lower() == "true":
+        urlScheme = "https"
+
+      if keyserverPortString is None or len(keyserverPortString.strip()) < 1:
+        keyserverPortString = ":9292"
+      else:
+        keyserverPortString = ":" + keyserverPortString.strip()
+      putCoreSiteProperty = self.putProperty(configurations, "core-site", services)
+      kmsPath = "kms://" + urlScheme + "@" + keyserverHostsString.strip() + keyserverPortString + "/kms"
+      putCoreSiteProperty("hadoop.security.key.provider.path", kmsPath)
+      putHdfsSiteProperty("dfs.encryption.key.provider.uri", kmsPath)
+
+    if "ranger-env" in services["configurations"] and "ranger-hdfs-plugin-properties" in services["configurations"] and \
+      "ranger-hdfs-plugin-enabled" in services["configurations"]["ranger-env"]["properties"]:
+      putHdfsRangerPluginProperty = self.putProperty(configurations, "ranger-hdfs-plugin-properties", services)
+      rangerEnvHdfsPluginProperty = services["configurations"]["ranger-env"]["properties"]["ranger-hdfs-plugin-enabled"]
+      putHdfsRangerPluginProperty("ranger-hdfs-plugin-enabled", rangerEnvHdfsPluginProperty)
+
+    putHdfsSitePropertyAttribute = self.putPropertyAttribute(configurations, "hdfs-site")
+    putCoreSitePropertyAttribute = self.putPropertyAttribute(configurations, "core-site")
+    if not "RANGER_KMS" in servicesList:
+      putCoreSitePropertyAttribute('hadoop.security.key.provider.path','delete','true')
+      putHdfsSitePropertyAttribute('dfs.encryption.key.provider.uri','delete','true')
+
+  def recommendHIVEConfigurations(self, configurations, clusterData, services, hosts):
+    super(HDP22StackAdvisor, self).recommendHiveConfigurations(configurations, clusterData, services, hosts)
+
+    putHiveServerProperty = self.putProperty(configurations, "hiveserver2-site", services)
+    putHiveEnvProperty = self.putProperty(configurations, "hive-env", services)
+    putHiveSiteProperty = self.putProperty(configurations, "hive-site", services)
+    putWebhcatSiteProperty = self.putProperty(configurations, "webhcat-site", services)
+    putHiveSitePropertyAttribute = self.putPropertyAttribute(configurations, "hive-site")
+    putHiveEnvPropertyAttributes = self.putPropertyAttribute(configurations, "hive-env")
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+
+    #  Storage
+    putHiveEnvProperty("hive_exec_orc_storage_strategy", "SPEED")
+    putHiveSiteProperty("hive.exec.orc.encoding.strategy", configurations["hive-env"]["properties"]["hive_exec_orc_storage_strategy"])
+    putHiveSiteProperty("hive.exec.orc.compression.strategy", configurations["hive-env"]["properties"]["hive_exec_orc_storage_strategy"])
+
+    putHiveSiteProperty("hive.exec.orc.default.stripe.size", "67108864")
+    putHiveSiteProperty("hive.exec.orc.default.compress", "ZLIB")
+    putHiveSiteProperty("hive.optimize.index.filter", "true")
+    putHiveSiteProperty("hive.optimize.sort.dynamic.partition", "false")
+
+    # Vectorization
+    putHiveSiteProperty("hive.vectorized.execution.enabled", "true")
+    putHiveSiteProperty("hive.vectorized.execution.reduce.enabled", "false")
+
+    # Transactions
+    putHiveEnvProperty("hive_txn_acid", "off")
+    if str(configurations["hive-env"]["properties"]["hive_txn_acid"]).lower() == "on":
+      putHiveSiteProperty("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager")
+      putHiveSiteProperty("hive.support.concurrency", "true")
+      putHiveSiteProperty("hive.compactor.initiator.on", "true")
+      putHiveSiteProperty("hive.compactor.worker.threads", "1")
+      putHiveSiteProperty("hive.enforce.bucketing", "true")
+      putHiveSiteProperty("hive.exec.dynamic.partition.mode", "nonstrict")
+    else:
+      putHiveSiteProperty("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager")
+      putHiveSiteProperty("hive.support.concurrency", "false")
+      putHiveSiteProperty("hive.compactor.initiator.on", "false")
+      putHiveSiteProperty("hive.compactor.worker.threads", "0")
+      putHiveSiteProperty("hive.enforce.bucketing", "false")
+      putHiveSiteProperty("hive.exec.dynamic.partition.mode", "strict")
+
+    hiveMetastoreHost = self.getHostWithComponent("HIVE", "HIVE_METASTORE", services, hosts)
+    if hiveMetastoreHost is not None and len(hiveMetastoreHost) > 0:
+      putHiveSiteProperty("hive.metastore.uris", "thrift://" + hiveMetastoreHost["Hosts"]["host_name"] + ":9083")
+
+    # ATS
+    putHiveEnvProperty("hive_timeline_logging_enabled", "true")
+
+    hooks_properties = ["hive.exec.pre.hooks", "hive.exec.post.hooks", "hive.exec.failure.hooks"]
+    include_ats_hook = str(configurations["hive-env"]["properties"]["hive_timeline_logging_enabled"]).lower() == "true"
+
+    ats_hook_class = "org.apache.hadoop.hive.ql.hooks.ATSHook"
+    for hooks_property in hooks_properties:
+      if hooks_property in configurations["hive-site"]["properties"]:
+        hooks_value = configurations["hive-site"]["properties"][hooks_property]
+      else:
+        hooks_value = " "
+      if include_ats_hook and ats_hook_class not in hooks_value:
+        if hooks_value == " ":
+          hooks_value = ats_hook_class
+        else:
+          hooks_value = hooks_value + "," + ats_hook_class
+      if not include_ats_hook and ats_hook_class in hooks_value:
+        hooks_classes = []
+        for hook_class in hooks_value.split(","):
+          if hook_class != ats_hook_class and hook_class != " ":
+            hooks_classes.append(hook_class)
+        if hooks_classes:
+          hooks_value = ",".join(hooks_classes)
+        else:
+          hooks_value = " "
+
+      putHiveSiteProperty(hooks_property, hooks_value)
+
+    # Tez Engine
+    if "TEZ" in servicesList:
+      putHiveSiteProperty("hive.execution.engine", "tez")
+    else:
+      putHiveSiteProperty("hive.execution.engine", "mr")
+
+    container_size = "512"
+
+    if not "yarn-site" in configurations:
+      self.recommendYARNConfigurations(configurations, clusterData, services, hosts)
+    #properties below should be always present as they are provided in HDP206 stack advisor at least
+    yarnMaxAllocationSize = min(30 * int(configurations["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"]), int(configurations["yarn-site"]["properties"]["yarn.scheduler.maximum-allocation-mb"]))
+    #duplicate tez task resource calc logic, direct dependency doesn't look good here (in case of Hive without Tez)
+    container_size = clusterData['mapMemory'] if clusterData['mapMemory'] > 2048 else int(clusterData['reduceMemory'])
+    container_size = min(clusterData['containers'] * clusterData['ramPerContainer'], container_size, yarnMaxAllocationSize)
+
+    if "TEZ" in servicesList:
+        putHiveSiteProperty("hive.tez.container.size", min(int(configurations["yarn-site"]["properties"]["yarn.scheduler.maximum-allocation-mb"]), container_size))
+    
+        putHiveSitePropertyAttribute("hive.tez.container.size", "minimum", int(configurations["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"]))
+        putHiveSitePropertyAttribute("hive.tez.container.size", "maximum", int(configurations["yarn-site"]["properties"]["yarn.scheduler.maximum-allocation-mb"]))
+
+        if "yarn-site" in services["configurations"]:
+          if "yarn.scheduler.minimum-allocation-mb" in services["configurations"]["yarn-site"]["properties"]:
+            putHiveSitePropertyAttribute("hive.tez.container.size", "minimum", int(services["configurations"]["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"]))
+          if "yarn.scheduler.maximum-allocation-mb" in services["configurations"]["yarn-site"]["properties"]:
+            putHiveSitePropertyAttribute("hive.tez.container.size", "maximum", int(services["configurations"]["yarn-site"]["properties"]["yarn.scheduler.maximum-allocation-mb"]))
+    
+        putHiveSiteProperty("hive.prewarm.enabled", "false")
+        putHiveSiteProperty("hive.prewarm.numcontainers", "3")
+        putHiveSiteProperty("hive.tez.auto.reducer.parallelism", "true")
+        putHiveSiteProperty("hive.tez.dynamic.partition.pruning", "true")
+        putHiveSiteProperty("hive.server2.tez.initialize.default.sessions", "false")
+        putHiveSiteProperty("hive.server2.tez.sessions.per.default.queue", "1")
+    
+        container_size = configurations["hive-site"]["properties"]["hive.tez.container.size"]
+        
+    container_size_bytes = int(int(container_size)*0.8*1024*1024) # Xmx == 80% of container
+    # Memory
+    # set noconditionaltask.size only if TEZ is in the service list. This value is too high for non tez case
+    if "TEZ" in servicesList:
+      putHiveSiteProperty("hive.auto.convert.join.noconditionaltask.size", int(round(container_size_bytes/3)))
+      putHiveSitePropertyAttribute("hive.auto.convert.join.noconditionaltask.size", "maximum", container_size_bytes)   
+    putHiveSiteProperty("hive.exec.reducers.bytes.per.reducer", "67108864")
+
+
+    # CBO
+    if "hive-site" in services["configurations"] and "hive.cbo.enable" in services["configurations"]["hive-site"]["properties"]:
+      hive_cbo_enable = services["configurations"]["hive-site"]["properties"]["hive.cbo.enable"]
+      putHiveSiteProperty("hive.stats.fetch.partition.stats", hive_cbo_enable)
+      putHiveSiteProperty("hive.stats.fetch.column.stats", hive_cbo_enable)
+
+    putHiveSiteProperty("hive.compute.query.using.stats", "true")     
+    putHiveSiteProperty("hive.server2.enable.doAs", "true")
+
+    yarn_queues = "default"
+    capacitySchedulerProperties = {}
+    if "capacity-scheduler" in services['configurations']:
+      if "capacity-scheduler" in services['configurations']["capacity-scheduler"]["properties"]:
+        properties = str(services['configurations']["capacity-scheduler"]["properties"]["capacity-scheduler"]).split('\n')
+        for property in properties:
+          key,sep,value = property.partition("=")
+          capacitySchedulerProperties[key] = value
+      if "yarn.scheduler.capacity.root.queues" in capacitySchedulerProperties:
+        yarn_queues = str(capacitySchedulerProperties["yarn.scheduler.capacity.root.queues"])
+      elif "yarn.scheduler.capacity.root.queues" in services['configurations']["capacity-scheduler"]["properties"]:
+        yarn_queues =  services['configurations']["capacity-scheduler"]["properties"]["yarn.scheduler.capacity.root.queues"]
+    # Interactive Queues property attributes
+    putHiveServerPropertyAttribute = self.putPropertyAttribute(configurations, "hiveserver2-site")
+    toProcessQueues = yarn_queues.split(",")
+    leafQueueNames = set() # Remove duplicates
+    while len(toProcessQueues) > 0:
+      queue = toProcessQueues.pop()
+      queueKey = "yarn.scheduler.capacity.root." + queue + ".queues"
+      if queueKey in capacitySchedulerProperties:
+        # This is a parent queue - need to add children
+        subQueues = capacitySchedulerProperties[queueKey].split(",")
+        for subQueue in subQueues:
+          toProcessQueues.append(queue + "." + subQueue)
+      else:
+        # This is a leaf queue
+        queueName = queue.split(".")[-1] # Fully qualified queue name does not work, we should use only leaf name
+        leafQueueNames.add(queueName)
+    leafQueues = [{"label": str(queueName) + " queue", "value": queueName} for queueName in leafQueueNames]
+    leafQueues = sorted(leafQueues, key=lambda q:q['value'])
+    putHiveSitePropertyAttribute("hive.server2.tez.default.queues", "entries", leafQueues)
+    putHiveSiteProperty("hive.server2.tez.default.queues", ",".join([leafQueue['value'] for leafQueue in leafQueues]))
+
+    webhcat_queue = self.recommendYarnQueue(services, "webhcat-site", "templeton.hadoop.queue.name")
+    if webhcat_queue is not None:
+      putWebhcatSiteProperty("templeton.hadoop.queue.name", webhcat_queue)
+
+
+    # Recommend Ranger Hive authorization as per Ranger Hive plugin property
+    if "ranger-env" in services["configurations"] and "hive-env" in services["configurations"] and \
+        "ranger-hive-plugin-enabled" in services["configurations"]["ranger-env"]["properties"]:
+      rangerEnvHivePluginProperty = services["configurations"]["ranger-env"]["properties"]["ranger-hive-plugin-enabled"]
+      if (rangerEnvHivePluginProperty.lower() == "yes"):
+        putHiveEnvProperty("hive_security_authorization", "RANGER")
+
+    # Security
+    if ("configurations" not in services) or ("hive-env" not in services["configurations"]) or \
+              ("properties" not in services["configurations"]["hive-env"]) or \
+              ("hive_security_authorization" not in services["configurations"]["hive-env"]["properties"]) or \
+              str(services["configurations"]["hive-env"]["properties"]["hive_security_authorization"]).lower() == "none":
+      putHiveEnvProperty("hive_security_authorization", "None")
+    else:
+      putHiveEnvProperty("hive_security_authorization", services["configurations"]["hive-env"]["properties"]["hive_security_authorization"])
+
+
+    # Recommend Ranger Hive authorization as per Ranger Hive plugin property
+    if "ranger-env" in services["configurations"] and "hive-env" in services["configurations"] and \
+        "ranger-hive-plugin-enabled" in services["configurations"]["ranger-env"]["properties"]:
+      rangerEnvHivePluginProperty = services["configurations"]["ranger-env"]["properties"]["ranger-hive-plugin-enabled"]
+      rangerEnvHiveAuthProperty = services["configurations"]["hive-env"]["properties"]["hive_security_authorization"]
+      if (rangerEnvHivePluginProperty.lower() == "yes"):
+        putHiveEnvProperty("hive_security_authorization", "Ranger")
+      elif (rangerEnvHiveAuthProperty.lower() == "ranger"):
+        putHiveEnvProperty("hive_security_authorization", "None")
+
+    # hive_security_authorization == 'none'
+    # this property is unrelated to Kerberos
+    if str(configurations["hive-env"]["properties"]["hive_security_authorization"]).lower() == "none":
+      putHiveSiteProperty("hive.security.authorization.manager", "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdConfOnlyAuthorizerFactory")
+      if ("hive.security.authorization.manager" in configurations["hiveserver2-site"]["properties"]) or \
+              ("hiveserver2-site" not in services["configurations"]) or \
+              ("hiveserver2-site" in services["configurations"] and "hive.security.authorization.manager" in services["configurations"]["hiveserver2-site"]["properties"]):
+        putHiveServerPropertyAttribute("hive.security.authorization.manager", "delete", "true")
+      if ("hive.security.authenticator.manager" in configurations["hiveserver2-site"]["properties"]) or \
+              ("hiveserver2-site" not in services["configurations"]) or \
+              ("hiveserver2-site" in services["configurations"] and "hive.security.authenticator.manager" in services["configurations"]["hiveserver2-site"]["properties"]):
+        putHiveServerPropertyAttribute("hive.security.authenticator.manager", "delete", "true")
+      if ("hive.conf.restricted.list" in configurations["hiveserver2-site"]["properties"]) or \
+              ("hiveserver2-site" not in services["configurations"]) or \
+              ("hiveserver2-site" in services["configurations"] and "hive.conf.restricted.list" in services["configurations"]["hiveserver2-site"]["properties"]):
+        putHiveServerPropertyAttribute("hive.conf.restricted.list", "delete", "true")
+      if "KERBEROS" not in servicesList: # Kerberos security depends on this property
+        putHiveSiteProperty("hive.security.authorization.enabled", "false")
+    else:
+      putHiveSiteProperty("hive.security.authorization.enabled", "true")
+
+    try:
+      auth_manager_value = str(configurations["hive-env"]["properties"]["hive.security.metastore.authorization.manager"])
+    except KeyError:
+      auth_manager_value = 'org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider'
+      pass
+    auth_manager_values = auth_manager_value.split(",")
+    sqlstdauth_class = "org.apache.hadoop.hive.ql.security.authorization.MetaStoreAuthzAPIAuthorizerEmbedOnly"
+
+    putHiveSiteProperty("hive.server2.enable.doAs", "true")
+
+    # hive_security_authorization == 'sqlstdauth'
+    if str(configurations["hive-env"]["properties"]["hive_security_authorization"]).lower() == "sqlstdauth":
+      putHiveSiteProperty("hive.server2.enable.doAs", "false")
+      putHiveServerProperty("hive.security.authorization.enabled", "true")
+      putHiveServerProperty("hive.security.authorization.manager", "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory")
+      putHiveServerProperty("hive.security.authenticator.manager", "org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator")
+      putHiveServerProperty("hive.conf.restricted.list", "hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role")
+      putHiveSiteProperty("hive.security.authorization.manager", "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdConfOnlyAuthorizerFactory")
+      if sqlstdauth_class not in auth_manager_values:
+        auth_manager_values.append(sqlstdauth_class)
+    elif sqlstdauth_class in auth_manager_values:
+      #remove item from csv
+      auth_manager_values = [x for x in auth_manager_values if x != sqlstdauth_class]
+      pass
+    putHiveSiteProperty("hive.security.metastore.authorization.manager", ",".join(auth_manager_values))
+
+    # hive_security_authorization == 'ranger'
+    if str(configurations["hive-env"]["properties"]["hive_security_authorization"]).lower() == "ranger":
+      putHiveSiteProperty("hive.server2.enable.doAs", "false")
+      putHiveServerProperty("hive.security.authorization.enabled", "true")
+      putHiveServerProperty("hive.security.authorization.manager", "com.xasecure.authorization.hive.authorizer.XaSecureHiveAuthorizerFactory")
+      putHiveServerProperty("hive.security.authenticator.manager", "org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator")
+      putHiveServerProperty("hive.conf.restricted.list", "hive.security.authorization.enabled,hive.security.authorization.manager,hive.security.authenticator.manager")
+
+    putHiveSiteProperty("hive.server2.use.SSL", "false")
+
+    #Hive authentication
+    hive_server2_auth = None
+    if "hive-site" in services["configurations"] and "hive.server2.authentication" in services["configurations"]["hive-site"]["properties"]:
+      hive_server2_auth = str(services["configurations"]["hive-site"]["properties"]["hive.server2.authentication"]).lower()
+    elif "hive.server2.authentication" in configurations["hive-site"]["properties"]:
+      hive_server2_auth = str(configurations["hive-site"]["properties"]["hive.server2.authentication"]).lower()
+
+    if hive_server2_auth == "ldap":
+      putHiveSiteProperty("hive.server2.authentication.ldap.url", "")
+    else:
+      if ("hive.server2.authentication.ldap.url" in configurations["hive-site"]["properties"]) or \
+              ("hive-site" not in services["configurations"]) or \
+              ("hive-site" in services["configurations"] and "hive.server2.authentication.ldap.url" in services["configurations"]["hive-site"]["properties"]):
+        putHiveSitePropertyAttribute("hive.server2.authentication.ldap.url", "delete", "true")
+
+    if hive_server2_auth == "kerberos":
+      if "hive-site" in services["configurations"] and "hive.server2.authentication.kerberos.keytab" not in services["configurations"]["hive-site"]["properties"]:
+        putHiveSiteProperty("hive.server2.authentication.kerberos.keytab", "")
+      if "hive-site" in services["configurations"] and "hive.server2.authentication.kerberos.principal" not in services["configurations"]["hive-site"]["properties"]:
+        putHiveSiteProperty("hive.server2.authentication.kerberos.principal", "")
+    elif "KERBEROS" not in servicesList: # Since 'hive_server2_auth' cannot be relied on within the default, empty recommendations request
+      if ("hive.server2.authentication.kerberos.keytab" in configurations["hive-site"]["properties"]) or \
+              ("hive-site" not in services["configurations"]) or \
+              ("hive-site" in services["configurations"] and "hive.server2.authentication.kerberos.keytab" in services["configurations"]["hive-site"]["properties"]):
+        putHiveSitePropertyAttribute("hive.server2.authentication.kerberos.keytab", "delete", "true")
+      if ("hive.server2.authentication.kerberos.principal" in configurations["hive-site"]["properties"]) or \
+              ("hive-site" not in services["configurations"]) or \
+              ("hive-site" in services["configurations"] and "hive.server2.authentication.kerberos.principal" in services["configurations"]["hive-site"]["properties"]):
+        putHiveSitePropertyAttribute("hive.server2.authentication.kerberos.principal", "delete", "true")
+
+    if hive_server2_auth == "pam":
+      putHiveSiteProperty("hive.server2.authentication.pam.services", "")
+    else:
+      if ("hive.server2.authentication.pam.services" in configurations["hive-site"]["properties"]) or \
+              ("hive-site" not in services["configurations"]) or \
+              ("hive-site" in services["configurations"] and "hive.server2.authentication.pam.services" in services["configurations"]["hive-site"]["properties"]):
+        putHiveSitePropertyAttribute("hive.server2.authentication.pam.services", "delete", "true")
+
+    if hive_server2_auth == "custom":
+      putHiveSiteProperty("hive.server2.custom.authentication.class", "")
+    else:
+      if ("hive.server2.authentication" in configurations["hive-site"]["properties"]) or \
+              ("hive-site" not in services["configurations"]) or \
+              ("hive-site" in services["configurations"] and "hive.server2.custom.authentication.class" in services["configurations"]["hive-site"]["properties"]):
+        putHiveSitePropertyAttribute("hive.server2.custom.authentication.class", "delete", "true")
+
+    # HiveServer, Client, Metastore heapsize
+    hs_heapsize_multiplier = 3.0/8
+    hm_heapsize_multiplier = 1.0/8
+    # HiveServer2 and HiveMetastore located on the same host
+    hive_server_hosts = self.getHostsWithComponent("HIVE", "HIVE_SERVER", services, hosts)
+    hive_client_hosts = self.getHostsWithComponent("HIVE", "HIVE_CLIENT", services, hosts)
+
+    if hive_server_hosts is not None and len(hive_server_hosts):
+      hs_host_ram = hive_server_hosts[0]["Hosts"]["total_mem"]/1024
+      putHiveEnvProperty("hive.metastore.heapsize", max(512, int(hs_host_ram*hm_heapsize_multiplier)))
+      putHiveEnvProperty("hive.heapsize", max(512, int(hs_host_ram*hs_heapsize_multiplier)))
+      putHiveEnvPropertyAttributes("hive.metastore.heapsize", "maximum", max(1024, hs_host_ram))
+      putHiveEnvPropertyAttributes("hive.heapsize", "maximum", max(1024, hs_host_ram))
+
+    if hive_client_hosts is not None and len(hive_client_hosts):
+      putHiveEnvProperty("hive.client.heapsize", 1024)
+      putHiveEnvPropertyAttributes("hive.client.heapsize", "maximum", max(1024, int(hive_client_hosts[0]["Hosts"]["total_mem"]/1024)))
+
+
+  def recommendHBASEConfigurations(self, configurations, clusterData, services, hosts):
+    super(HDP22StackAdvisor, self).recommendHbaseConfigurations(configurations, clusterData, services, hosts)
+    putHbaseEnvPropertyAttributes = self.putPropertyAttribute(configurations, "hbase-env")
+
+    hmaster_host = self.getHostWithComponent("HBASE", "HBASE_MASTER", services, hosts)
+    if hmaster_host is not None:
+      host_ram = hmaster_host["Hosts"]["total_mem"]
+      putHbaseEnvPropertyAttributes('hbase_master_heapsize', 'maximum', max(1024, int(host_ram/1024)))
+
+    rs_hosts = self.getHostsWithComponent("HBASE", "HBASE_REGIONSERVER", services, hosts)
+    if rs_hosts is not None and len(rs_hosts) > 0:
+      min_ram = rs_hosts[0]["Hosts"]["total_mem"]
+      for host in rs_hosts:
+        host_ram = host["Hosts"]["total_mem"]
+        min_ram = min(min_ram, host_ram)
+
+      putHbaseEnvPropertyAttributes('hbase_regionserver_heapsize', 'maximum', max(1024, int(min_ram*0.8/1024)))
+
+    putHbaseSiteProperty = self.putProperty(configurations, "hbase-site", services)
+    putHbaseSitePropertyAttributes = self.putPropertyAttribute(configurations, "hbase-site")
+    putHbaseSiteProperty("hbase.regionserver.global.memstore.size", '0.4')
+
+    if 'hbase-env' in services['configurations'] and 'phoenix_sql_enabled' in services['configurations']['hbase-env']['properties'] and \
+       'true' == services['configurations']['hbase-env']['properties']['phoenix_sql_enabled'].lower():
+      putHbaseSiteProperty("hbase.regionserver.wal.codec", 'org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec')
+      putHbaseSiteProperty("phoenix.functions.allowUserDefinedFunctions", 'true')
+    else:
+      putHbaseSiteProperty("hbase.regionserver.wal.codec", 'org.apache.hadoop.hbase.regionserver.wal.WALCellCodec')
+      if ('hbase.rpc.controllerfactory.class' in configurations["hbase-site"]["properties"]) or \
+              ('hbase-site' in services['configurations'] and 'hbase.rpc.controllerfactory.class' in services['configurations']["hbase-site"]["properties"]):
+        putHbaseSitePropertyAttributes('hbase.rpc.controllerfactory.class', 'delete', 'true')
+      if ('phoenix.functions.allowUserDefinedFunctions' in configurations["hbase-site"]["properties"]) or \
+              ('hbase-site' in services['configurations'] and 'phoenix.functions.allowUserDefinedFunctions' in services['configurations']["hbase-site"]["properties"]):
+        putHbaseSitePropertyAttributes('phoenix.functions.allowUserDefinedFunctions', 'delete', 'true')
+
+    if "ranger-env" in services["configurations"] and "ranger-hbase-plugin-properties" in services["configurations"] and \
+        "ranger-hbase-plugin-enabled" in services["configurations"]["ranger-env"]["properties"]:
+      putHbaseRangerPluginProperty = self.putProperty(configurations, "ranger-hbase-plugin-properties", services)
+      rangerEnvHbasePluginProperty = services["configurations"]["ranger-env"]["properties"]["ranger-hbase-plugin-enabled"]
+      putHbaseRangerPluginProperty("ranger-hbase-plugin-enabled", rangerEnvHbasePluginProperty)
+      if "cluster-env" in services["configurations"] and "smokeuser" in services["configurations"]["cluster-env"]["properties"]:
+        smoke_user = services["configurations"]["cluster-env"]["properties"]["smokeuser"]
+        putHbaseRangerPluginProperty("policy_user", smoke_user)
+    rangerPluginEnabled = ''
+    if 'ranger-hbase-plugin-properties' in configurations and 'ranger-hbase-plugin-enabled' in  configurations['ranger-hbase-plugin-properties']['properties']:
+      rangerPluginEnabled = configurations['ranger-hbase-plugin-properties']['properties']['ranger-hbase-plugin-enabled']
+    elif 'ranger-hbase-plugin-properties' in services['configurations'] and 'ranger-hbase-plugin-enabled' in services['configurations']['ranger-hbase-plugin-properties']['properties']:
+      rangerPluginEnabled = services['configurations']['ranger-hbase-plugin-properties']['properties']['ranger-hbase-plugin-enabled']
+
+    if rangerPluginEnabled and rangerPluginEnabled.lower() == 'Yes'.lower():
+      putHbaseSiteProperty('hbase.security.authorization','true')
+
+    # Recommend configs for bucket cache
+    threshold = 23 # 2 Gb is reserved for other offheap memory
+    mb = 1024
+    if (int(clusterData["hbaseRam"]) > threshold):
+      # To enable cache - calculate values
+      regionserver_total_ram = int(clusterData["hbaseRam"]) * mb
+      regionserver_heap_size = 20480
+      regionserver_max_direct_memory_size = regionserver_total_ram - regionserver_heap_size
+      hfile_block_cache_size = '0.4'
+      block_cache_heap = 8192 # int(regionserver_heap_size * hfile_block_cache_size)
+      hbase_regionserver_global_memstore_size = '0.4'
+      reserved_offheap_memory = 2048
+      bucketcache_offheap_memory = regionserver_max_direct_memory_size - reserved_offheap_memory
+      hbase_bucketcache_size = bucketcache_offheap_memory
+      hbase_bucketcache_percentage_in_combinedcache = float(bucketcache_offheap_memory) / hbase_bucketcache_size
+      hbase_bucketcache_percentage_in_combinedcache_str = "{0:.4f}".format(math.ceil(hbase_bucketcache_percentage_in_combinedcache * 10000) / 10000.0)
+
+      # Set values in hbase-site
+      putHbaseSiteProperty('hfile.block.cache.size', hfile_block_cache_size)
+      putHbaseSiteProperty('hbase.regionserver.global.memstore.size', hbase_regionserver_global_memstore_size)
+      putHbaseSiteProperty('hbase.bucketcache.ioengine', 'offheap')
+      putHbaseSiteProperty('hbase.bucketcache.size', hbase_bucketcache_size)
+      putHbaseSiteProperty('hbase.bucketcache.percentage.in.combinedcache', hbase_bucketcache_percentage_in_combinedcache_str)
+
+      # Enable in hbase-env
+      putHbaseEnvProperty = self.putProperty(configurations, "hbase-env", services)
+      putHbaseEnvProperty('hbase_max_direct_memory_size', regionserver_max_direct_memory_size)
+      putHbaseEnvProperty('hbase_regionserver_heapsize', regionserver_heap_size)
+    else:
+      # Disable
+      if ('hbase.bucketcache.ioengine' in configurations["hbase-site"]["properties"]) or \
+              ('hbase-site' in services['configurations'] and 'hbase.bucketcache.ioengine' in services['configurations']["hbase-site"]["properties"]):
+        putHbaseSitePropertyAttributes('hbase.bucketcache.ioengine', 'delete', 'true')
+      if ('hbase.bucketcache.size' in configurations["hbase-site"]["properties"]) or \
+              ('hbase-site' in services['configurations'] and 'hbase.bucketcache.size' in services['configurations']["hbase-site"]["properties"]):
+        putHbaseSitePropertyAttributes('hbase.bucketcache.size', 'delete', 'true')
+      if ('hbase.bucketcache.percentage.in.combinedcache' in configurations["hbase-site"]["properties"]) or \
+              ('hbase-site' in services['configurations'] and 'hbase.bucketcache.percentage.in.combinedcache' in services['configurations']["hbase-site"]["properties"]):
+        putHbaseSitePropertyAttributes('hbase.bucketcache.percentage.in.combinedcache', 'delete', 'true')
+      if ('hbase_max_direct_memory_size' in configurations["hbase-env"]["properties"]) or \
+              ('hbase-env' in services['configurations'] and 'hbase_max_direct_memory_size' in services['configurations']["hbase-env"]["properties"]):
+        putHbaseEnvPropertyAttributes('hbase_max_direct_memory_size', 'delete', 'true')
+
+    # Authorization
+    hbaseCoProcessorConfigs = {
+      'hbase.coprocessor.region.classes': [],
+      'hbase.coprocessor.regionserver.classes': [],
+      'hbase.coprocessor.master.classes': []
+    }
+    for key in hbaseCoProcessorConfigs:
+      hbase_coprocessor_classes = None
+      if key in configurations["hbase-site"]["properties"]:
+        hbase_coprocessor_classes = configurations["hbase-site"]["properties"][key].strip()
+      elif 'hbase-site' in services['configurations'] and key in services['configurations']["hbase-site"]["properties"]:
+        hbase_coprocessor_classes = services['configurations']["hbase-site"]["properties"][key].strip()
+      if hbase_coprocessor_classes:
+        hbaseCoProcessorConfigs[key] = hbase_coprocessor_classes.split(',')
+
+    # If configurations has it - it has priority as it is calculated. Then, the service's configurations will be used.
+    hbase_security_authorization = None
+    if 'hbase-site' in configurations and 'hbase.security.authorization' in configurations['hbase-site']['properties']:
+      hbase_security_authorization = configurations['hbase-site']['properties']['hbase.security.authorization']
+    elif 'hbase-site' in services['configurations'] and 'hbase.security.authorization' in services['configurations']['hbase-site']['properties']:
+      hbase_security_authorization = services['configurations']['hbase-site']['properties']['hbase.security.authorization']
+    if hbase_security_authorization:
+      if 'true' == hbase_security_authorization.lower():
+        hbaseCoProcessorConfigs['hbase.coprocessor.master.classes'].append('org.apache.hadoop.hbase.security.access.AccessController')
+        hbaseCoProcessorConfigs['hbase.coprocessor.regionserver.classes'].append('org.apache.hadoop.hbase.security.access.AccessController')
+        # regional classes when hbase authorization is enabled
+        authRegionClasses = ['org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint', 'org.apache.hadoop.hbase.security.access.AccessController']
+        for item in range(len(authRegionClasses)):
+          hbaseCoProcessorConfigs['hbase.coprocessor.region.classes'].append(authRegionClasses[item])
+      else:
+        if 'org.apache.hadoop.hbase.security.access.AccessController' in hbaseCoProcessorConfigs['hbase.coprocessor.region.classes']:
+          hbaseCoProcessorConfigs['hbase.coprocessor.region.classes'].remove('org.apache.hadoop.hbase.security.access.AccessController')
+        if 'org.apache.hadoop.hbase.security.access.AccessController' in hbaseCoProcessorConfigs['hbase.coprocessor.master.classes']:
+          hbaseCoProcessorConfigs['hbase.coprocessor.master.classes'].remove('org.apache.hadoop.hbase.security.access.AccessController')
+
+        hbaseCoProcessorConfigs['hbase.coprocessor.region.classes'].append("org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint")
+        if ('hbase.coprocessor.regionserver.classes' in configurations["hbase-site"]["properties"]) or \
+                ('hbase-site' in services['configurations'] and 'hbase.coprocessor.regionserver.classes' in services['configurations']["hbase-site"]["properties"]):
+          putHbaseSitePropertyAttributes('hbase.coprocessor.regionserver.classes', 'delete', 'true')
+    else:
+      hbaseCoProcessorConfigs['hbase.coprocessor.region.classes'].append("org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint")
+      if ('hbase.coprocessor.regionserver.classes' in configurations["hbase-site"]["properties"]) or \
+              ('hbase-site' in services['configurations'] and 'hbase.coprocessor.regionserver.classes' in services['configurations']["hbase-site"]["properties"]):
+        putHbaseSitePropertyAttributes('hbase.coprocessor.regionserver.classes', 'delete', 'true')
+
+    # Authentication
+    if 'hbase-site' in services['configurations'] and 'hbase.security.authentication' in services['configurations']['hbase-site']['properties']:
+      if 'kerberos' == services['configurations']['hbase-site']['properties']['hbase.security.authentication'].lower():
+        if 'org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint' not in hbaseCoProcessorConfigs['hbase.coprocessor.region.classes']:
+          hbaseCoProcessorConfigs['hbase.coprocessor.region.classes'].append('org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint')
+        if 'org.apache.hadoop.hbase.security.token.TokenProvider' not in hbaseCoProcessorConfigs['hbase.coprocessor.region.classes']:
+          hbaseCoProcessorConfigs['hbase.coprocessor.region.classes'].append('org.apache.hadoop.hbase.security.token.TokenProvider')
+      else:
+        if 'org.apache.hadoop.hbase.security.token.TokenProvider' in hbaseCoProcessorConfigs['hbase.coprocessor.region.classes']:
+          hbaseCoProcessorConfigs['hbase.coprocessor.region.classes'].remove('org.apache.hadoop.hbase.security.token.TokenProvider')
+
+    #Remove duplicates
+    for key in hbaseCoProcessorConfigs:
+      uniqueCoprocessorRegionClassList = []
+      [uniqueCoprocessorRegionClassList.append(i)
+       for i in hbaseCoProcessorConfigs[key] if
+       not i in uniqueCoprocessorRegionClassList
+       and (i.strip() not in ['{{hbase_coprocessor_region_classes}}', '{{hbase_coprocessor_master_classes}}', '{{hbase_coprocessor_regionserver_classes}}'])]
+      putHbaseSiteProperty(key, ','.join(set(uniqueCoprocessorRegionClassList)))
+
+
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    rangerServiceVersion=''
+    if 'RANGER' in servicesList:
+      rangerServiceVersion = [service['StackServices']['service_version'] for service in services["services"] if service['StackServices']['service_name'] == 'RANGER'][0]
+
+    if rangerServiceVersion and rangerServiceVersion == '0.4.0':
+      rangerClass = 'com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor'
+    else:
+      rangerClass = 'org.apache.ranger.authorization.hbase.RangerAuthorizationCoprocessor'
+
+    nonRangerClass = 'org.apache.hadoop.hbase.security.access.AccessController'
+    hbaseClassConfigs =  hbaseCoProcessorConfigs.keys()
+
+    for item in range(len(hbaseClassConfigs)):
+      if 'hbase-site' in services['configurations']:
+        if hbaseClassConfigs[item] in services['configurations']['hbase-site']['properties']:
+          if 'hbase-site' in configurations and hbaseClassConfigs[item] in configurations['hbase-site']['properties']:
+            coprocessorConfig = configurations['hbase-site']['properties'][hbaseClassConfigs[item]]
+          else:
+            coprocessorConfig = services['configurations']['hbase-site']['properties'][hbaseClassConfigs[item]]
+          coprocessorClasses = coprocessorConfig.split(",")
+          coprocessorClasses = filter(None, coprocessorClasses) # Removes empty string elements from array
+          if rangerPluginEnabled and rangerPluginEnabled.lower() == 'Yes'.lower():
+            if nonRangerClass in coprocessorClasses:
+              coprocessorClasses.remove(nonRangerClass)
+            if not rangerClass in coprocessorClasses:
+              coprocessorClasses.append(rangerClass)
+            putHbaseSiteProperty(hbaseClassConfigs[item], ','.join(coprocessorClasses))
+          elif rangerPluginEnabled and rangerPluginEnabled.lower() == 'No'.lower():
+            if rangerClass in coprocessorClasses:
+              coprocessorClasses.remove(rangerClass)
+              if not nonRangerClass in coprocessorClasses:
+                coprocessorClasses.append(nonRangerClass)
+              putHbaseSiteProperty(hbaseClassConfigs[item], ','.join(coprocessorClasses))
+        elif rangerPluginEnabled and rangerPluginEnabled.lower() == 'Yes'.lower():
+          putHbaseSiteProperty(hbaseClassConfigs[item], rangerClass)
+
+
+  def recommendTezConfigurations(self, configurations, clusterData, services, hosts):
+    if not "yarn-site" in configurations:
+      self.recommendYARNConfigurations(configurations, clusterData, services, hosts)
+    #properties below should be always present as they are provided in HDP206 stack advisor
+    yarnMaxAllocationSize = min(30 * int(configurations["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"]), int(configurations["yarn-site"]["properties"]["yarn.scheduler.maximum-allocation-mb"]))
+
+    putTezProperty = self.putProperty(configurations, "tez-site", services)
+    putTezProperty("tez.am.resource.memory.mb", min(int(configurations["yarn-site"]["properties"]["yarn.scheduler.maximum-allocation-mb"]), int(clusterData['amMemory']) * 2 if int(clusterData['amMemory']) < 3072 else int(clusterData['amMemory'])))
+
+    taskResourceMemory = clusterData['mapMemory'] if clusterData['mapMemory'] > 2048 else int(clusterData['reduceMemory'])
+    taskResourceMemory = min(clusterData['containers'] * clusterData['ramPerContainer'], taskResourceMemory, yarnMaxAllocationSize)
+    putTezProperty("tez.task.resource.memory.mb", min(int(configurations["yarn-site"]["properties"]["yarn.scheduler.maximum-allocation-mb"]), taskResourceMemory))
+    taskResourceMemory = int(configurations["tez-site"]["properties"]["tez.task.resource.memory.mb"])
+    putTezProperty("tez.runtime.io.sort.mb", min(int(taskResourceMemory * 0.4), 2047))
+    putTezProperty("tez.runtime.unordered.output.buffer.size-mb", int(taskResourceMemory * 0.075))
+    putTezProperty("tez.session.am.dag.submit.timeout.secs", "600")
+
+    tez_queue = self.recommendYarnQueue(services, "tez-site", "tez.queue.name")
+    if tez_queue is not None:
+      putTezProperty("tez.queue.name", tez_queue)
+
+    serverProperties = services["ambari-server-properties"]
+    latest_tez_jar_version = None
+
+    server_host = socket.getfqdn()
+    for host in hosts["items"]:
+      if server_host == host["Hosts"]["host_name"]:
+        server_host = host["Hosts"]["public_host_name"]
+    server_port = '8080'
+    server_protocol = 'http'
+    views_dir = '/var/lib/ambari-server/resources/views/'
+
+    if serverProperties:
+      if 'client.api.port' in serverProperties:
+        server_port = serverProperties['client.api.port']
+      if 'views.dir' in serverProperties:
+        views_dir = serverProperties['views.dir']
+      if 'api.ssl' in serverProperties:
+        if serverProperties['api.ssl'].lower() == 'true':
+          server_protocol = 'https'
+
+      views_work_dir = os.path.join(views_dir, 'work')
+
+      if os.path.exists(views_work_dir) and os.path.isdir(views_work_dir):
+        last_version = '0.0.0'
+        for file in os.listdir(views_work_dir):
+          if fnmatch.fnmatch(file, 'TEZ{*}'):
+            current_version = file.lstrip("TEZ{").rstrip("}") # E.g.: TEZ{0.7.0.2.3.0.0-2154}
+            if self.versionCompare(current_version.replace("-", "."), last_version.replace("-", ".")) >= 0:
+              latest_tez_jar_version = current_version
+              last_version = current_version
+            pass
+        pass
+      pass
+    pass
+
+    if latest_tez_jar_version:
+      tez_url = '{0}://{1}:{2}/#/main/views/TEZ/{3}/TEZ_CLUSTER_INSTANCE'.format(server_protocol, server_host, server_port, latest_tez_jar_version)
+      putTezProperty("tez.tez-ui.history-url.base", tez_url)
+    pass
+
+  def recommendStormConfigurations(self, configurations, clusterData, services, hosts):
+    super(HDP22StackAdvisor, self).recommendStormConfigurations(configurations, clusterData, services, hosts)
+    putStormSiteProperty = self.putProperty(configurations, "storm-site", services)
+    putStormSiteAttributes = self.putPropertyAttribute(configurations, "storm-site")
+    storm_site = getServicesSiteProperties(services, "storm-site")
+    security_enabled = (storm_site is not None and "storm.zookeeper.superACL" in storm_site)
+    if "ranger-env" in services["configurations"] and "ranger-storm-plugin-properties" in services["configurations"] and \
+        "ranger-storm-plugin-enabled" in services["configurations"]["ranger-env"]["properties"]:
+      putStormRangerPluginProperty = self.putProperty(configurations, "ranger-storm-plugin-properties", services)
+      rangerEnvStormPluginProperty = services["configurations"]["ranger-env"]["properties"]["ranger-storm-plugin-enabled"]
+      putStormRangerPluginProperty("ranger-storm-plugin-enabled", rangerEnvStormPluginProperty)
+
+    rangerPluginEnabled = ''
+    if 'ranger-storm-plugin-properties' in configurations and 'ranger-storm-plugin-enabled' in  configurations['ranger-storm-plugin-properties']['properties']:
+      rangerPluginEnabled = configurations['ranger-storm-plugin-properties']['properties']['ranger-storm-plugin-enabled']
+    elif 'ranger-storm-plugin-properties' in services['configurations'] and 'ranger-storm-plugin-enabled' in services['configurations']['ranger-storm-plugin-properties']['properties']:
+      rangerPluginEnabled = services['configurations']['ranger-storm-plugin-properties']['properties']['ranger-storm-plugin-enabled']
+
+    nonRangerClass = 'backtype.storm.security.auth.authorizer.SimpleACLAuthorizer'
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    rangerServiceVersion=''
+    if 'RANGER' in servicesList:
+      rangerServiceVersion = [service['StackServices']['service_version'] for service in services["services"] if service['StackServices']['service_name'] == 'RANGER'][0]
+
+    if rangerServiceVersion and rangerServiceVersion == '0.4.0':
+      rangerClass = 'com.xasecure.authorization.storm.authorizer.XaSecureStormAuthorizer'
+    else:
+      rangerClass = 'org.apache.ranger.authorization.storm.authorizer.RangerStormAuthorizer'
+    # Cluster is kerberized
+    if security_enabled:
+      if rangerPluginEnabled and (rangerPluginEnabled.lower() == 'Yes'.lower()):
+        putStormSiteProperty('nimbus.authorizer',rangerClass)
+      elif (services["configurations"]["storm-site"]["properties"]["nimbus.authorizer"] == rangerClass):
+        putStormSiteProperty('nimbus.authorizer', nonRangerClass)
+    else:
+      putStormSiteAttributes('nimbus.authorizer', 'delete', 'true')
+
+  def recommendKnoxConfigurations(self, configurations, clusterData, services, hosts):
+    if "ranger-env" in services["configurations"] and "ranger-knox-plugin-properties" in services["configurations"] and \
+        "ranger-knox-plugin-enabled" in services["configurations"]["ranger-env"]["properties"]:
+      putKnoxRangerPluginProperty = self.putProperty(configurations, "ranger-knox-plugin-properties", services)
+      rangerEnvKnoxPluginProperty = services["configurations"]["ranger-env"]["properties"]["ranger-knox-plugin-enabled"]
+      putKnoxRangerPluginProperty("ranger-knox-plugin-enabled", rangerEnvKnoxPluginProperty)
+
+    if 'topology' in services["configurations"] and 'content' in services["configurations"]["topology"]["properties"]:
+      putKnoxTopologyContent = self.putProperty(configurations, "topology", services)
+      rangerPluginEnabled = ''
+      if 'ranger-knox-plugin-properties' in configurations and 'ranger-knox-plugin-enabled' in  configurations['ranger-knox-plugin-properties']['properties']:
+        rangerPluginEnabled = configurations['ranger-knox-plugin-properties']['properties']['ranger-knox-plugin-enabled']
+      elif 'ranger-knox-plugin-properties' in services['configurations'] and 'ranger-knox-plugin-enabled' in services['configurations']['ranger-knox-plugin-properties']['properties']:
+        rangerPluginEnabled = services['configurations']['ranger-knox-plugin-properties']['properties']['ranger-knox-plugin-enabled']
+
+      # check if authorization provider already added
+      topologyContent = services["configurations"]["topology"]["properties"]["content"]
+      authorizationProviderExists = False
+      authNameChanged = False
+      root = ET.fromstring(topologyContent)
+      if root is not None:
+        gateway = root.find("gateway")
+        if gateway is not None:
+          for provider in gateway.findall('provider'):
+            role = provider.find('role')
+            if role is not None and role.text and role.text.lower() == "authorization":
+              authorizationProviderExists = True
+
+            name = provider.find('name')
+            if name is not None and name.text == "AclsAuthz" and rangerPluginEnabled \
+               and rangerPluginEnabled.lower() == "Yes".lower():
+              newAuthName = "XASecurePDPKnox"
+              authNameChanged = True
+            elif name is not None and (((not rangerPluginEnabled) or rangerPluginEnabled.lower() != "Yes".lower()) \
+               and name.text == 'XASecurePDPKnox'):
+              newAuthName = "AclsAuthz"
+              authNameChanged = True
+
+            if authNameChanged:
+              name.text = newAuthName
+              putKnoxTopologyContent('content', ET.tostring(root))
+
+            if authorizationProviderExists:
+              break
+
+      if not authorizationProviderExists:
+        if root is not None:
+          gateway = root.find("gateway")
+          if gateway is not None:
+            provider = ET.SubElement(gateway, 'provider')
+
+            role = ET.SubElement(provider, 'role')
+            role.text = "authorization"
+
+            name = ET.SubElement(provider, 'name')
+            if rangerPluginEnabled and rangerPluginEnabled.lower() == "Yes".lower():
+              name.text = "XASecurePDPKnox"
+            else:
+              name.text = "AclsAuthz"
+
+            enabled = ET.SubElement(provider, 'enabled')
+            enabled.text = "true"
+
+            #TODO add pretty format for newly added provider
+            putKnoxTopologyContent('content', ET.tostring(root))
+
+
+
+  def recommendRangerConfigurations(self, configurations, clusterData, services, hosts):
+    super(HDP22StackAdvisor, self).recommendRangerConfigurations(configurations, clusterData, services, hosts)
+    putRangerEnvProperty = self.putProperty(configurations, "ranger-env")
+    cluster_env = getServicesSiteProperties(services, "cluster-env")
+    security_enabled = cluster_env is not None and "security_enabled" in cluster_env and \
+                       cluster_env["security_enabled"].lower() == "true"
+    if "ranger-env" in configurations and not security_enabled:
+      putRangerEnvProperty("ranger-storm-plugin-enabled", "No")
+
+  def getServiceConfigurationValidators(self):
+    parentValidators = super(HDP22StackAdvisor, self).getServiceConfigurationValidators()
+    childValidators = {
+      "HDFS": {"hdfs-site": self.validateHDFSConfigurations,
+               "hadoop-env": self.validateHDFSConfigurationsEnv,
+               "ranger-hdfs-plugin-properties": self.validateHDFSRangerPluginConfigurations},
+      "YARN": {"yarn-env": self.validateYARNEnvConfigurations,
+               "ranger-yarn-plugin-properties": self.validateYARNRangerPluginConfigurations},
+      "HIVE": {"hiveserver2-site": self.validateHiveServer2Configurations,
+               "hive-site": self.validateHiveConfigurations,
+               "hive-env": self.validateHiveConfigurationsEnv,
+               "webhcat-site": self.validateWebhcatConfigurations},
+      "HBASE": {"hbase-site": self.validateHBASEConfigurations,
+                "hbase-env": self.validateHBASEEnvConfigurations,
+                "ranger-hbase-plugin-properties": self.validateHBASERangerPluginConfigurations},
+      "KNOX": {"ranger-knox-plugin-properties": self.validateKnoxRangerPluginConfigurations},
+      "KAFKA": {"ranger-kafka-plugin-properties": self.validateKafkaRangerPluginConfigurations},
+      "STORM": {"ranger-storm-plugin-properties": self.validateStormRangerPluginConfigurations},
+      "MAPREDUCE2": {"mapred-site": self.validateMapReduce2Configurations},
+      "TEZ": {"tez-site": self.validateTezConfigurations},
+      "RANGER": {"ranger-env": self.validateRangerConfigurationsEnv},
+      "SPARK": {"spark-defaults": self.validateSparkDefaults,
+                "spark-thrift-sparkconf": self.validateSparkThriftSparkConf}
+    }
+    self.mergeValidators(parentValidators, childValidators)
+    return parentValidators
+
+  def recommendLogsearchConfigurations(self, configurations, clusterData, services, hosts):
+    putLogsearchProperty = self.putProperty(configurations, "logsearch-properties", services)
+    infraSolrHosts = self.getComponentHostNames(services, "AMBARI_INFRA", "INFRA_SOLR")
+
+    if infraSolrHosts is not None and len(infraSolrHosts) > 0 \
+      and "logsearch-properties" in services["configurations"]:
+      recommendedMinShards = len(infraSolrHosts)
+      recommendedShards = 2 * len(infraSolrHosts)
+      recommendedMaxShards = 3 * len(infraSolrHosts)
+      # recommend number of shard
+      putLogsearchAttribute = self.putPropertyAttribute(configurations, "logsearch-properties")
+      putLogsearchAttribute('logsearch.collection.service.logs.numshards', 'minimum', recommendedMinShards)
+      putLogsearchAttribute('logsearch.collection.service.logs.numshards', 'maximum', recommendedMaxShards)
+      putLogsearchProperty("logsearch.collection.service.logs.numshards", recommendedShards)
+
+      putLogsearchAttribute('logsearch.collection.audit.logs.numshards', 'minimum', recommendedMinShards)
+      putLogsearchAttribute('logsearch.collection.audit.logs.numshards', 'maximum', recommendedMaxShards)
+      putLogsearchProperty("logsearch.collection.audit.logs.numshards", recommendedShards)
+      # recommend replication factor
+      replicationReccomendFloat = math.log(len(infraSolrHosts), 5)
+      recommendedReplicationFactor = int(1 + math.floor(replicationReccomendFloat))
+      putLogsearchProperty("logsearch.collection.service.logs.replication.factor", recommendedReplicationFactor)
+      putLogsearchProperty("logsearch.collection.audit.logs.replication.factor", recommendedReplicationFactor)
+
+  def validateTezConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = [ {"config-name": 'tez.am.resource.memory.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'tez.am.resource.memory.mb')},
+                        {"config-name": 'tez.task.resource.memory.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'tez.task.resource.memory.mb')},
+                        {"config-name": 'tez.runtime.io.sort.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'tez.runtime.io.sort.mb')},
+                        {"config-name": 'tez.runtime.unordered.output.buffer.size-mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'tez.runtime.unordered.output.buffer.size-mb')},
+                        {"config-name": 'tez.queue.name', "item": self.validatorYarnQueue(properties, recommendedDefaults, 'tez.queue.name', services)} ]
+    if "tez.tez-ui.history-url.base" in recommendedDefaults:
+      validationItems.append({"config-name": 'tez.tez-ui.history-url.base', "item": self.validatorEqualsToRecommendedItem(properties, recommendedDefaults, 'tez.tez-ui.history-url.base')})
+
+    tez_site = properties
+    prop_name1 = 'tez.am.resource.memory.mb'
+    prop_name2 = 'tez.task.resource.memory.mb'
+    yarnSiteProperties = getSiteProperties(configurations, "yarn-site")
+    if yarnSiteProperties:
+      yarnMaxAllocationSize = min(30 * int(configurations["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"]),int(configurations["yarn-site"]["properties"]["yarn.scheduler.maximum-allocation-mb"]))
+      if int(tez_site[prop_name1]) > yarnMaxAllocationSize:
+          validationItems.append({"config-name": prop_name1,
+                                  "item": self.getWarnItem(
+                                      "{0} should be less than YARN max allocation size ({1})".format(prop_name1, yarnMaxAllocationSize))})
+      if int(tez_site[prop_name2]) > yarnMaxAllocationSize:
+          validationItems.append({"config-name": prop_name2,
+                                  "item": self.getWarnItem(
+                                      "{0} should be less than YARN max allocation size ({1})".format(prop_name2, yarnMaxAllocationSize))})
+
+    return self.toConfigurationValidationProblems(validationItems, "tez-site")
+
+  def recommendMapReduce2Configurations(self, configurations, clusterData, services, hosts):
+    self.recommendYARNConfigurations(configurations, clusterData, services, hosts)
+    putMapredProperty = self.putProperty(configurations, "mapred-site", services)
+    nodemanagerMinRam = 1048576 # 1TB in mb
+    if "referenceNodeManagerHost" in clusterData:
+      nodemanagerMinRam = min(clusterData["referenceNodeManagerHost"]["total_mem"]/1024, nodemanagerMinRam)
+    putMapredProperty('yarn.app.mapreduce.am.resource.mb', configurations["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"])
+    putMapredProperty('yarn.app.mapreduce.am.command-opts', "-Xmx" + str(int(0.8 * int(configurations["mapred-site"]["properties"]["yarn.app.mapreduce.am.resource.mb"]))) + "m" + " -Dhdp.version=${hdp.version}")
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    min_mapreduce_map_memory_mb = 0
+    min_mapreduce_reduce_memory_mb = 0
+    min_mapreduce_map_java_opts = 0
+    if ("PIG" in servicesList) and clusterData["totalAvailableRam"] >= 4096:
+      min_mapreduce_map_memory_mb = 1536
+      min_mapreduce_reduce_memory_mb = 1536
+      min_mapreduce_map_java_opts = 1024
+    putMapredProperty('mapreduce.map.memory.mb', min(int(configurations["yarn-site"]["properties"]["yarn.scheduler.maximum-allocation-mb"]), max(min_mapreduce_map_memory_mb, int(configurations["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"]))))
+    putMapredProperty('mapreduce.reduce.memory.mb', min(int(configurations["yarn-site"]["properties"]["yarn.scheduler.maximum-allocation-mb"]), max(min_mapreduce_reduce_memory_mb, min(2*int(configurations["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"]), int(nodemanagerMinRam)))))
+    mapredMapXmx = int(0.8*int(configurations["mapred-site"]["properties"]["mapreduce.map.memory.mb"]));
+    putMapredProperty('mapreduce.map.java.opts', "-Xmx" + str(max(min_mapreduce_map_java_opts, mapredMapXmx)) + "m")
+    putMapredProperty('mapreduce.reduce.java.opts', "-Xmx" + str(int(0.8*int(configurations["mapred-site"]["properties"]["mapreduce.reduce.memory.mb"]))) + "m")
+    putMapredProperty('mapreduce.task.io.sort.mb', str(min(int(0.7*mapredMapXmx), 2047)))
+    # Property Attributes
+    putMapredPropertyAttribute = self.putPropertyAttribute(configurations, "mapred-site")
+    yarnMinAllocationSize = int(configurations["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"])
+    yarnMaxAllocationSize = min(30 * int(configurations["yarn-site"]["properties"]["yarn.scheduler.minimum-allocation-mb"]), int(configurations["yarn-site"]["properties"]["yarn.scheduler.maximum-allocation-mb"]))
+    putMapredPropertyAttribute("mapreduce.map.memory.mb", "maximum", yarnMaxAllocationSize)
+    putMapredPropertyAttribute("mapreduce.map.memory.mb", "minimum", yarnMinAllocationSize)
+    putMapredPropertyAttribute("mapreduce.reduce.memory.mb", "maximum", yarnMaxAllocationSize)
+    putMapredPropertyAttribute("mapreduce.reduce.memory.mb", "minimum", yarnMinAllocationSize)
+    putMapredPropertyAttribute("yarn.app.mapreduce.am.resource.mb", "maximum", yarnMaxAllocationSize)
+    putMapredPropertyAttribute("yarn.app.mapreduce.am.resource.mb", "minimum", yarnMinAllocationSize)
+    # Hadoop MR limitation
+    putMapredPropertyAttribute("mapreduce.task.io.sort.mb", "maximum", "2047")
+
+    mr_queue = self.recommendYarnQueue(services, "mapred-site", "mapreduce.job.queuename")
+    if mr_queue is not None:
+      putMapredProperty("mapreduce.job.queuename", mr_queue)
+
+  def validateMapReduce2Configurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = [ {"config-name": 'mapreduce.map.java.opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'mapreduce.map.java.opts')},
+                        {"config-name": 'mapreduce.reduce.java.opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'mapreduce.reduce.java.opts')},
+                        {"config-name": 'mapreduce.task.io.sort.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'mapreduce.task.io.sort.mb')},
+                        {"config-name": 'mapreduce.map.memory.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'mapreduce.map.memory.mb')},
+                        {"config-name": 'mapreduce.reduce.memory.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'mapreduce.reduce.memory.mb')},
+                        {"config-name": 'yarn.app.mapreduce.am.resource.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.app.mapreduce.am.resource.mb')},
+                        {"config-name": 'yarn.app.mapreduce.am.command-opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'yarn.app.mapreduce.am.command-opts')},
+                        {"config-name": 'mapreduce.job.queuename', "item": self.validatorYarnQueue(properties, recommendedDefaults, 'mapreduce.job.queuename', services)} ]
+
+    if 'mapreduce.map.java.opts' in properties and \
+      checkXmxValueFormat(properties['mapreduce.map.java.opts']):
+      mapreduceMapJavaOpts = formatXmxSizeToBytes(getXmxSize(properties['mapreduce.map.java.opts'])) / (1024.0 * 1024)
+      mapreduceMapMemoryMb = to_number(properties['mapreduce.map.memory.mb'])
+      if mapreduceMapJavaOpts > mapreduceMapMemoryMb:
+        validationItems.append({"config-name": 'mapreduce.map.java.opts', "item": self.getWarnItem("mapreduce.map.java.opts Xmx should be less than mapreduce.map.memory.mb ({0})".format(mapreduceMapMemoryMb))})
+
+    if 'mapreduce.reduce.java.opts' in properties and \
+      checkXmxValueFormat(properties['mapreduce.reduce.java.opts']):
+      mapreduceReduceJavaOpts = formatXmxSizeToBytes(getXmxSize(properties['mapreduce.reduce.java.opts'])) / (1024.0 * 1024)
+      mapreduceReduceMemoryMb = to_number(properties['mapreduce.reduce.memory.mb'])
+      if mapreduceReduceJavaOpts > mapreduceReduceMemoryMb:
+        validationItems.append({"config-name": 'mapreduce.reduce.java.opts', "item": self.getWarnItem("mapreduce.reduce.java.opts Xmx should be less than mapreduce.reduce.memory.mb ({0})".format(mapreduceReduceMemoryMb))})
+
+    if 'yarn.app.mapreduce.am.command-opts' in properties and \
+      checkXmxValueFormat(properties['yarn.app.mapreduce.am.command-opts']):
+      yarnAppMapreduceAmCommandOpts = formatXmxSizeToBytes(getXmxSize(properties['yarn.app.mapreduce.am.command-opts'])) / (1024.0 * 1024)
+      yarnAppMapreduceAmResourceMb = to_number(properties['yarn.app.mapreduce.am.resource.mb'])
+      if yarnAppMapreduceAmCommandOpts > yarnAppMapreduceAmResourceMb:
+        validationItems.append({"config-name": 'yarn.app.mapreduce.am.command-opts', "item": self.getWarnItem("yarn.app.mapreduce.am.command-opts Xmx should be less than yarn.app.mapreduce.am.resource.mb ({0})".format(yarnAppMapreduceAmResourceMb))})
+
+    return self.toConfigurationValidationProblems(validationItems, "mapred-site")
+
+  def validateHDFSConfigurationsEnv(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = [ {"config-name": 'namenode_heapsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'namenode_heapsize')},
+                        {"config-name": 'namenode_opt_newsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'namenode_opt_newsize')},
+                        {"config-name": 'namenode_opt_maxnewsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'namenode_opt_maxnewsize')}]
+    return self.toConfigurationValidationProblems(validationItems, "hadoop-env")
+
+  def validateHDFSRangerPluginConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = []
+    ranger_plugin_properties = getSiteProperties(configurations, "ranger-hdfs-plugin-properties")
+    ranger_plugin_enabled = ranger_plugin_properties['ranger-hdfs-plugin-enabled'] if ranger_plugin_properties else 'No'
+    if (ranger_plugin_enabled.lower() == 'yes'):
+      # ranger-hdfs-plugin must be enabled in ranger-env
+      ranger_env = getServicesSiteProperties(services, 'ranger-env')
+      if not ranger_env or not 'ranger-hdfs-plugin-enabled' in ranger_env or \
+          ranger_env['ranger-hdfs-plugin-enabled'].lower() != 'yes':
+        validationItems.append({"config-name": 'ranger-hdfs-plugin-enabled',
+                                "item": self.getWarnItem(
+                                  "ranger-hdfs-plugin-properties/ranger-hdfs-plugin-enabled must correspond ranger-env/ranger-hdfs-plugin-enabled")})
+    return self.toConfigurationValidationProblems(validationItems, "ranger-hdfs-plugin-properties")
+
+
+  def validateHDFSConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    parentValidationProblems = super(HDP22StackAdvisor, self).validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, hosts)
+    # We can not access property hadoop.security.authentication from the
+    # other config (core-site). That's why we are using another heuristics here
+    hdfs_site = properties
+    core_site = getSiteProperties(configurations, "core-site")
+
+    dfs_encrypt_data_transfer = 'dfs.encrypt.data.transfer'  # Hadoop Wire encryption
+    try:
+      wire_encryption_enabled = hdfs_site[dfs_encrypt_data_transfer] == "true"
+    except KeyError:
+      wire_encryption_enabled = False
+
+    HTTP_ONLY = 'HTTP_ONLY'
+    HTTPS_ONLY = 'HTTPS_ONLY'
+    HTTP_AND_HTTPS = 'HTTP_AND_HTTPS'
+
+    VALID_HTTP_POLICY_VALUES = [HTTP_ONLY, HTTPS_ONLY, HTTP_AND_HTTPS]
+    VALID_TRANSFER_PROTECTION_VALUES = ['authentication', 'integrity', 'privacy']
+
+    validationItems = []
+    address_properties = [
+      # "dfs.datanode.address",
+      # "dfs.datanode.http.address",
+      # "dfs.datanode.https.address",
+      # "dfs.datanode.ipc.address",
+      # "dfs.journalnode.http-address",
+      # "dfs.journalnode.https-address",
+      # "dfs.namenode.rpc-address",
+      # "dfs.namenode.secondary.http-address",
+      "dfs.namenode.http-address",
+      "dfs.namenode.https-address",
+    ]
+    #Validating *address properties for correct values
+
+    for address_property in address_properties:
+      if address_property in hdfs_site:
+        value = hdfs_site[address_property]
+        if not is_valid_host_port_authority(value):
+          validationItems.append({"config-name" : address_property, "item" :
+            self.getErrorItem(address_property + " does not contain a valid host:port authority: " + value)})
+
+    #Adding Ranger Plugin logic here
+    ranger_plugin_properties = getSiteProperties(configurations, "ranger-hdfs-plugin-properties")
+    ranger_plugin_enabled = ranger_plugin_properties['ranger-hdfs-plugin-enabled'] if ranger_plugin_properties else 'No'
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    if ("RANGER" in servicesList) and (ranger_plugin_enabled.lower() == 'Yes'.lower()):
+      if 'dfs.permissions.enabled' in hdfs_site and \
+        hdfs_site['dfs.permissions.enabled'] != 'true':
+        validationItems.append({"config-name": 'dfs.permissions.enabled',
+                                    "item": self.getWarnItem(
+                                      "dfs.permissions.enabled needs to be set to true if Ranger HDFS Plugin is enabled.")})
+
+    if (not wire_encryption_enabled and   # If wire encryption is enabled at Hadoop, it disables all our checks
+          'hadoop.security.authentication' in core_site and
+          core_site['hadoop.security.authentication'] == 'kerberos' and
+          'hadoop.security.authorization' in core_site and
+          core_site['hadoop.security.authorization'] == 'true'):
+      # security is enabled
+
+      dfs_http_policy = 'dfs.http.policy'
+      dfs_datanode_address = 'dfs.datanode.address'
+      datanode_http_address = 'dfs.datanode.http.address'
+      datanode_https_address = 'dfs.datanode.https.address'
+      data_transfer_protection = 'dfs.data.transfer.protection'
+
+      try: # Params may be absent
+        privileged_dfs_dn_port = isSecurePort(getPort(hdfs_site[dfs_datanode_address]))
+      except KeyError:
+        privileged_dfs_dn_port = False
+      try:
+        privileged_dfs_http_port = isSecurePort(getPort(hdfs_site[datanode_http_address]))
+      except KeyError:
+        privileged_dfs_http_port = False
+      try:
+        privileged_dfs_https_port = isSecurePort(getPort(hdfs_site[datanode_https_address]))
+      except KeyError:
+        privileged_dfs_https_port = False
+      try:
+        dfs_http_policy_value = hdfs_site[dfs_http_policy]
+      except KeyError:
+        dfs_http_policy_value = HTTP_ONLY  # Default
+      try:
+        data_transfer_protection_value = hdfs_site[data_transfer_protection]
+      except KeyError:
+        data_transfer_protection_value = None
+
+      if dfs_http_policy_value not in VALID_HTTP_POLICY_VALUES:
+        validationItems.append({"config-name": dfs_http_policy,
+                                "item": self.getWarnItem(
+                                  "Invalid property value: {0}. Valid values are {1}".format(
+                                    dfs_http_policy_value, VALID_HTTP_POLICY_VALUES))})
+
+      # determine whether we use secure ports
+      address_properties_with_warnings = []
+      if dfs_http_policy_value == HTTPS_ONLY:
+        if not privileged_dfs_dn_port and (privileged_dfs_https_port or datanode_https_address not in hdfs_site):
+          important_properties = [dfs_datanode_address, datanode_https_address]
+          message = "You set up datanode to use some non-secure ports. " \
+                    "If you want to run Datanode under non-root user in a secure cluster, " \
+                    "you should set all these properties {2} " \
+                    "to use non-secure ports (if property {3} does not exist, " \
+                    "just add it). You may also set up property {4} ('{5}' is a good default value). " \
+                    "Also, set up WebHDFS with SSL as " \
+                    "described in manual in order to be able to " \
+                    "use HTTPS.".format(dfs_http_policy, dfs_http_policy_value, important_properties,
+                                        datanode_https_address, data_transfer_protection,
+                                        VALID_TRANSFER_PROTECTION_VALUES[0])
+          address_properties_with_warnings.extend(important_properties)
+      else:  # dfs_http_policy_value == HTTP_AND_HTTPS or HTTP_ONLY
+        # We don't enforce datanode_https_address to use privileged ports here
+        any_nonprivileged_ports_are_in_use = not privileged_dfs_dn_port or not privileged_dfs_http_port
+        if any_nonprivileged_ports_are_in_use:
+          important_properties = [dfs_datanode_address, datanode_http_address]
+          message = "You have set up datanode to use some non-secure ports, but {0} is set to {1}. " \
+                    "In a secure cluster, Datanode forbids using non-secure ports " \
+                    "if {0} is not set to {3}. " \
+                    "Please make sure that properties {2} use secure ports.".format(
+                      dfs_http_policy, dfs_http_policy_value, important_properties, HTTPS_ONLY)
+          address_properties_with_warnings.extend(important_properties)
+
+      # Generate port-related warnings if any
+      for prop in address_properties_with_warnings:
+        validationItems.append({"config-name": prop,
+                                "item": self.getWarnItem(message)})
+
+      # Check if it is appropriate to use dfs.data.transfer.protection
+      if data_transfer_protection_value is not None:
+        if dfs_http_policy_value in [HTTP_ONLY, HTTP_AND_HTTPS]:
+          validationItems.append({"config-name": data_transfer_protection,
+                                  "item": self.getWarnItem(
+                                    "{0} property can not be used when {1} is set to any "
+                                    "value other then {2}. Tip: When {1} property is not defined, it defaults to {3}".format(
+                                    data_transfer_protection, dfs_http_policy, HTTPS_ONLY, HTTP_ONLY))})
+        elif not data_transfer_protection_value in VALID_TRANSFER_PROTECTION_VALUES:
+          validationItems.append({"config-name": data_transfer_protection,
+                                  "item": self.getWarnItem(
+                                    "Invalid property value: {0}. Valid values are {1}.".format(
+                                      d

<TRUNCATED>

[45/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/ams.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/ams.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/ams.py
new file mode 100755
index 0000000..e72ad82
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/ams.py
@@ -0,0 +1,388 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons.str_utils import compress_backslashes
+import glob
+import os
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def ams(name=None):
+  import params
+  if name == 'collector':
+    if not check_windows_service_exists(params.ams_collector_win_service_name):
+      Execute(format("cmd /C cd {ams_collector_home_dir} & ambari-metrics-collector.cmd setup"))
+
+    Directory(params.ams_collector_conf_dir,
+              owner=params.ams_user,
+              create_parents=True
+    )
+
+    Directory(params.ams_checkpoint_dir,
+              owner=params.ams_user,
+              create_parents=True
+    )
+
+    XmlConfig("ams-site.xml",
+              conf_dir=params.ams_collector_conf_dir,
+              configurations=params.config['configurations']['ams-site'],
+              configuration_attributes=params.config['configuration_attributes']['ams-site'],
+              owner=params.ams_user,
+    )
+
+    merged_ams_hbase_site = {}
+    merged_ams_hbase_site.update(params.config['configurations']['ams-hbase-site'])
+    if params.security_enabled:
+      merged_ams_hbase_site.update(params.config['configurations']['ams-hbase-security-site'])
+
+    XmlConfig( "hbase-site.xml",
+               conf_dir = params.ams_collector_conf_dir,
+               configurations = merged_ams_hbase_site,
+               configuration_attributes=params.config['configuration_attributes']['ams-hbase-site'],
+               owner = params.ams_user,
+    )
+
+    if (params.log4j_props != None):
+      File(os.path.join(params.ams_collector_conf_dir, "log4j.properties"),
+           owner=params.ams_user,
+           content=params.log4j_props
+      )
+
+    File(os.path.join(params.ams_collector_conf_dir, "ams-env.cmd"),
+         owner=params.ams_user,
+         content=InlineTemplate(params.ams_env_sh_template)
+    )
+
+    ServiceConfig(params.ams_collector_win_service_name,
+                  action="change_user",
+                  username = params.ams_user,
+                  password = Script.get_password(params.ams_user))
+
+    if not params.is_local_fs_rootdir:
+      # Configuration needed to support NN HA
+      XmlConfig("hdfs-site.xml",
+            conf_dir=params.ams_collector_conf_dir,
+            configurations=params.config['configurations']['hdfs-site'],
+            configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
+            owner=params.ams_user,
+            group=params.user_group,
+            mode=0644
+      )
+
+      XmlConfig("hdfs-site.xml",
+            conf_dir=params.hbase_conf_dir,
+            configurations=params.config['configurations']['hdfs-site'],
+            configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
+            owner=params.ams_user,
+            group=params.user_group,
+            mode=0644
+      )
+
+      XmlConfig("core-site.xml",
+                conf_dir=params.ams_collector_conf_dir,
+                configurations=params.config['configurations']['core-site'],
+                configuration_attributes=params.config['configuration_attributes']['core-site'],
+                owner=params.ams_user,
+                group=params.user_group,
+                mode=0644
+      )
+
+      XmlConfig("core-site.xml",
+                conf_dir=params.hbase_conf_dir,
+                configurations=params.config['configurations']['core-site'],
+                configuration_attributes=params.config['configuration_attributes']['core-site'],
+                owner=params.ams_user,
+                group=params.user_group,
+                mode=0644
+      )
+
+    else:
+      ServiceConfig(params.ams_embedded_hbase_win_service_name,
+                    action="change_user",
+                    username = params.ams_user,
+                    password = Script.get_password(params.ams_user))
+      # creating symbolic links on ams jars to make them available to services
+      links_pairs = [
+        ("%COLLECTOR_HOME%\\hbase\\lib\\ambari-metrics-hadoop-sink-with-common.jar",
+         "%SINK_HOME%\\hadoop-sink\\ambari-metrics-hadoop-sink-with-common-*.jar"),
+        ]
+      for link_pair in links_pairs:
+        link, target = link_pair
+        real_link = os.path.expandvars(link)
+        target = compress_backslashes(glob.glob(os.path.expandvars(target))[0])
+        if not os.path.exists(real_link):
+          #TODO check the symlink destination too. Broken in Python 2.x on Windows.
+          Execute('cmd /c mklink "{0}" "{1}"'.format(real_link, target))
+    pass
+
+  elif name == 'monitor':
+    if not check_windows_service_exists(params.ams_monitor_win_service_name):
+      Execute(format("cmd /C cd {ams_monitor_home_dir} & ambari-metrics-monitor.cmd setup"))
+
+    # creating symbolic links on ams jars to make them available to services
+    links_pairs = [
+      ("%HADOOP_HOME%\\share\\hadoop\\common\\lib\\ambari-metrics-hadoop-sink-with-common.jar",
+       "%SINK_HOME%\\hadoop-sink\\ambari-metrics-hadoop-sink-with-common-*.jar"),
+      ("%HBASE_HOME%\\lib\\ambari-metrics-hadoop-sink-with-common.jar",
+       "%SINK_HOME%\\hadoop-sink\\ambari-metrics-hadoop-sink-with-common-*.jar"),
+    ]
+    for link_pair in links_pairs:
+      link, target = link_pair
+      real_link = os.path.expandvars(link)
+      target = compress_backslashes(glob.glob(os.path.expandvars(target))[0])
+      if not os.path.exists(real_link):
+        #TODO check the symlink destination too. Broken in Python 2.x on Windows.
+        Execute('cmd /c mklink "{0}" "{1}"'.format(real_link, target))
+
+    Directory(params.ams_monitor_conf_dir,
+              owner=params.ams_user,
+              create_parents=True
+    )
+
+    TemplateConfig(
+      os.path.join(params.ams_monitor_conf_dir, "metric_monitor.ini"),
+      owner=params.ams_user,
+      template_tag=None
+    )
+
+    TemplateConfig(
+      os.path.join(params.ams_monitor_conf_dir, "metric_groups.conf"),
+      owner=params.ams_user,
+      template_tag=None
+    )
+
+    ServiceConfig(params.ams_monitor_win_service_name,
+                  action="change_user",
+                  username = params.ams_user,
+                  password = Script.get_password(params.ams_user))
+
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def ams(name=None):
+  import params
+
+  if name == 'collector':
+    Directory(params.ams_collector_conf_dir,
+              owner=params.ams_user,
+              group=params.user_group,
+              create_parents=True
+    )
+
+    Execute(('chown', '-R', params.ams_user, params.ams_collector_conf_dir),
+            sudo=True
+            )
+
+    Directory(params.ams_checkpoint_dir,
+              owner=params.ams_user,
+              group=params.user_group,
+              cd_access="a",
+              create_parents=True
+    )
+
+    Execute(('chown', '-R', params.ams_user, params.ams_checkpoint_dir),
+            sudo=True
+            )
+
+    XmlConfig("ams-site.xml",
+              conf_dir=params.ams_collector_conf_dir,
+              configurations=params.config['configurations']['ams-site'],
+              configuration_attributes=params.config['configuration_attributes']['ams-site'],
+              owner=params.ams_user,
+              group=params.user_group
+    )
+
+    merged_ams_hbase_site = {}
+    merged_ams_hbase_site.update(params.config['configurations']['ams-hbase-site'])
+    if params.security_enabled:
+      merged_ams_hbase_site.update(params.config['configurations']['ams-hbase-security-site'])
+
+    # Add phoenix client side overrides
+    merged_ams_hbase_site['phoenix.query.maxGlobalMemoryPercentage'] = str(params.phoenix_max_global_mem_percent)
+    merged_ams_hbase_site['phoenix.spool.directory'] = params.phoenix_client_spool_dir
+
+    XmlConfig( "hbase-site.xml",
+               conf_dir = params.ams_collector_conf_dir,
+               configurations = merged_ams_hbase_site,
+               configuration_attributes=params.config['configuration_attributes']['ams-hbase-site'],
+               owner = params.ams_user,
+               group = params.user_group
+    )
+
+    if params.security_enabled:
+      TemplateConfig(os.path.join(params.hbase_conf_dir, "ams_collector_jaas.conf"),
+                     owner = params.ams_user,
+                     template_tag = None)
+
+    if (params.log4j_props != None):
+      File(format("{params.ams_collector_conf_dir}/log4j.properties"),
+           mode=0644,
+           group=params.user_group,
+           owner=params.ams_user,
+           content=params.log4j_props
+      )
+
+    File(format("{ams_collector_conf_dir}/ams-env.sh"),
+         owner=params.ams_user,
+         content=InlineTemplate(params.ams_env_sh_template)
+    )
+
+    Directory(params.ams_collector_log_dir,
+              owner=params.ams_user,
+              group=params.user_group,
+              cd_access="a",
+              create_parents=True,
+              mode=0755,
+    )
+
+    Directory(params.ams_collector_pid_dir,
+              owner=params.ams_user,
+              group=params.user_group,
+              cd_access="a",
+              create_parents=True,
+              mode=0755,
+    )
+
+    # Hack to allow native HBase libs to be included for embedded hbase
+    File(os.path.join(params.ams_hbase_home_dir, "bin", "hadoop"),
+         owner=params.ams_user,
+         mode=0755
+    )
+
+    # On some OS this folder could be not exists, so we will create it before pushing there files
+    Directory(params.limits_conf_dir,
+              create_parents=True,
+              owner='root',
+              group='root'
+    )
+
+    # Setting up security limits
+    File(os.path.join(params.limits_conf_dir, 'ams.conf'),
+         owner='root',
+         group='root',
+         mode=0644,
+         content=Template("ams.conf.j2")
+    )
+
+    # Phoenix spool file dir if not /tmp
+    if not os.path.exists(params.phoenix_client_spool_dir):
+      Directory(params.phoenix_client_spool_dir,
+                owner=params.ams_user,
+                mode = 0755,
+                group=params.user_group,
+                cd_access="a",
+                create_parents=True
+      )
+    pass
+
+    if not params.is_local_fs_rootdir and params.is_ams_distributed:
+      # Configuration needed to support NN HA
+      XmlConfig("hdfs-site.xml",
+            conf_dir=params.ams_collector_conf_dir,
+            configurations=params.config['configurations']['hdfs-site'],
+            configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
+            owner=params.ams_user,
+            group=params.user_group,
+            mode=0644
+      )
+
+      XmlConfig("hdfs-site.xml",
+            conf_dir=params.hbase_conf_dir,
+            configurations=params.config['configurations']['hdfs-site'],
+            configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
+            owner=params.ams_user,
+            group=params.user_group,
+            mode=0644
+      )
+
+      XmlConfig("core-site.xml",
+                conf_dir=params.ams_collector_conf_dir,
+                configurations=params.config['configurations']['core-site'],
+                configuration_attributes=params.config['configuration_attributes']['core-site'],
+                owner=params.ams_user,
+                group=params.user_group,
+                mode=0644
+      )
+
+      XmlConfig("core-site.xml",
+                conf_dir=params.hbase_conf_dir,
+                configurations=params.config['configurations']['core-site'],
+                configuration_attributes=params.config['configuration_attributes']['core-site'],
+                owner=params.ams_user,
+                group=params.user_group,
+                mode=0644
+      )
+
+    pass
+
+  elif name == 'monitor':
+    Directory(params.ams_monitor_conf_dir,
+              owner=params.ams_user,
+              group=params.user_group,
+              create_parents=True
+    )
+
+    Directory(params.ams_monitor_log_dir,
+              owner=params.ams_user,
+              group=params.user_group,
+              mode=0755,
+              create_parents=True
+    )
+
+    Directory(params.ams_monitor_pid_dir,
+              owner=params.ams_user,
+              group=params.user_group,
+              mode=0755,
+              create_parents=True
+    )
+
+    Directory(format("{ams_monitor_dir}/psutil/build"),
+              owner=params.ams_user,
+              group=params.user_group,
+              cd_access="a",
+              create_parents=True)
+
+    Execute(format("{sudo} chown -R {ams_user}:{user_group} {ams_monitor_dir}")
+    )
+
+    TemplateConfig(
+      format("{ams_monitor_conf_dir}/metric_monitor.ini"),
+      owner=params.ams_user,
+      group=params.user_group,
+      template_tag=None
+    )
+
+    TemplateConfig(
+      format("{ams_monitor_conf_dir}/metric_groups.conf"),
+      owner=params.ams_user,
+      group=params.user_group,
+      template_tag=None
+    )
+
+    File(format("{ams_monitor_conf_dir}/ams-env.sh"),
+         owner=params.ams_user,
+         content=InlineTemplate(params.ams_env_sh_template)
+    )
+
+    # TODO
+    pass
+
+  pass

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/ams_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/ams_service.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/ams_service.py
new file mode 100755
index 0000000..0726802
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/ams_service.py
@@ -0,0 +1,103 @@
+# !/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from hbase_service import hbase_service
+import os
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def ams_service(name, action):
+  import params
+  if name == 'collector':
+    Service(params.ams_embedded_hbase_win_service_name, action=action)
+    Service(params.ams_collector_win_service_name, action=action)
+  elif name == 'monitor':
+    Service(params.ams_monitor_win_service_name, action=action)
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def ams_service(name, action):
+  import params
+
+  if name == 'collector':
+    cmd = format("{ams_collector_script} --config {ams_collector_conf_dir}")
+    pid_file = format("{ams_collector_pid_dir}/ambari-metrics-collector.pid")
+    #no_op_test should be much more complex to work with cumulative status of collector
+    #removing as startup script handle it also
+    #no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
+
+    if params.is_hbase_distributed:
+      hbase_service('zookeeper', action=action)
+      hbase_service('master', action=action)
+      hbase_service('regionserver', action=action)
+      cmd = format("{cmd} --distributed")
+
+    if action == 'start':
+      Execute(format("{sudo} rm -rf {hbase_tmp_dir}/*.tmp")
+      )
+
+      if not params.is_hbase_distributed and os.path.exists(format("{zookeeper_data_dir}")):
+        Directory(params.zookeeper_data_dir,
+                  action='delete'
+        )
+
+
+      if params.security_enabled:
+        kinit_cmd = format("{kinit_path_local} -kt {ams_collector_keytab_path} {ams_collector_jaas_princ};")
+        daemon_cmd = format("{kinit_cmd} {cmd} start")
+      else:
+        daemon_cmd = format("{cmd} start")
+
+      Execute(daemon_cmd,
+              user=params.ams_user
+      )
+
+      pass
+    elif action == 'stop':
+      daemon_cmd = format("{cmd} stop")
+      Execute(daemon_cmd,
+              user=params.ams_user
+      )
+
+      pass
+    pass
+  elif name == 'monitor':
+    cmd = format("{ams_monitor_script} --config {ams_monitor_conf_dir}")
+    pid_file = format("{ams_monitor_pid_dir}/ambari-metrics-monitor.pid")
+    no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
+
+    if action == 'start':
+      daemon_cmd = format("{cmd} start")
+      Execute(daemon_cmd,
+              user=params.ams_user
+      )
+
+      pass
+    elif action == 'stop':
+
+      daemon_cmd = format("{cmd} stop")
+      Execute(daemon_cmd,
+              user=params.ams_user
+      )
+
+      pass
+    pass
+  pass

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/functions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/functions.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/functions.py
new file mode 100755
index 0000000..3252dc1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/functions.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+import re
+import math
+import datetime
+
+from resource_management.core.shell import checked_call
+
+def calc_xmn_from_xms(heapsize_str, xmn_percent, xmn_max):
+  """
+  @param heapsize: str (e.g 1000m)
+  @param xmn_percent: float (e.g 0.2)
+  @param xmn_max: integer (e.g 512)
+  """
+  heapsize = int(re.search('\d+', str(heapsize_str)).group(0))
+  heapsize_unit = re.search('\D+', str(heapsize_str)).group(0)
+
+  xmn_val = int(math.floor(heapsize*xmn_percent))
+  xmn_val -= xmn_val % 8
+
+  result_xmn_val = xmn_max if xmn_val > xmn_max else xmn_val
+  return str(result_xmn_val) + heapsize_unit
+
+def trim_heap_property(property, m_suffix = "m"):
+  if property and property.endswith(m_suffix):
+    property = property[:-1]
+  return property
+
+def check_append_heap_property(property, m_suffix = "m"):
+  if property and not property.endswith(m_suffix):
+    property += m_suffix
+  return property
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/hbase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/hbase.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/hbase.py
new file mode 100755
index 0000000..16d741f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/hbase.py
@@ -0,0 +1,267 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+from ambari_commons import OSConst
+from resource_management import *
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def hbase(name=None, action = None):
+  import params
+  Directory(params.hbase_conf_dir,
+            owner = params.hadoop_user,
+            create_parents = True
+  )
+  Directory(params.hbase_tmp_dir,
+             create_parents = True,
+             owner = params.hadoop_user
+  )
+
+  Directory (os.path.join(params.local_dir, "jars"),
+             owner = params.hadoop_user,
+             create_parents = True
+  )
+
+  XmlConfig("hbase-site.xml",
+            conf_dir = params.hbase_conf_dir,
+            configurations = params.config['configurations']['ams-hbase-site'],
+            configuration_attributes=params.config['configuration_attributes']['ams-hbase-site'],
+            owner = params.hadoop_user
+  )
+
+  if 'ams-hbase-policy' in params.config['configurations']:
+    XmlConfig("hbase-policy.xml",
+              conf_dir = params.hbase_conf_dir,
+              configurations = params.config['configurations']['ams-hbase-policy'],
+              configuration_attributes=params.config['configuration_attributes']['ams-hbase-policy'],
+              owner = params.hadoop_user
+    )
+  # Manually overriding ownership of file installed by hadoop package
+  else:
+    File(os.path.join(params.hbase_conf_dir, "hbase-policy.xml"),
+          owner = params.hadoop_user
+    )
+
+  # Metrics properties
+  File(os.path.join(params.hbase_conf_dir, "hadoop-metrics2-hbase.properties"),
+       owner = params.hbase_user,
+       content=Template("hadoop-metrics2-hbase.properties.j2")
+  )
+
+  hbase_TemplateConfig('regionservers', user=params.hadoop_user)
+
+  if params.security_enabled:
+    hbase_TemplateConfig(format("hbase_{name}_jaas.conf"), user=params.hadoop_user)
+
+  if name != "client":
+    Directory (params.hbase_log_dir,
+               owner = params.hadoop_user,
+               create_parents = True
+    )
+
+  if (params.hbase_log4j_props != None):
+    File(os.path.join(params.hbase_conf_dir, "log4j.properties"),
+         owner=params.hadoop_user,
+         content=params.hbase_log4j_props
+    )
+  elif (os.path.exists(os.path.join(params.hbase_conf_dir,"log4j.properties"))):
+    File(os.path.join(params.hbase_conf_dir,"log4j.properties"),
+         owner=params.hadoop_user
+    )
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def hbase(name=None # 'master' or 'regionserver' or 'client'
+          , action=None):
+  import params
+
+  Directory(params.hbase_conf_dir,
+      owner = params.hbase_user,
+      group = params.user_group,
+      create_parents = True
+  )
+
+  Execute(('chown', '-R', params.hbase_user, params.hbase_conf_dir),
+          sudo=True
+          )
+
+  Directory (params.hbase_tmp_dir,
+             owner = params.hbase_user,
+             cd_access="a",
+             create_parents = True
+  )
+
+  Execute(('chown', '-R', params.hbase_user, params.hbase_tmp_dir),
+          sudo=True
+          )
+
+  Directory (os.path.join(params.local_dir, "jars"),
+             owner = params.hbase_user,
+             group = params.user_group,
+             cd_access="a",
+             mode=0775,
+             create_parents = True
+  )
+
+  merged_ams_hbase_site = {}
+  merged_ams_hbase_site.update(params.config['configurations']['ams-hbase-site'])
+  if params.security_enabled:
+    merged_ams_hbase_site.update(params.config['configurations']['ams-hbase-security-site'])
+
+  XmlConfig("hbase-site.xml",
+            conf_dir = params.hbase_conf_dir,
+            configurations = merged_ams_hbase_site,
+            configuration_attributes=params.config['configuration_attributes']['ams-hbase-site'],
+            owner = params.hbase_user,
+            group = params.user_group
+  )
+
+  # Phoenix spool file dir if not /tmp
+  if not os.path.exists(params.phoenix_server_spool_dir):
+    Directory(params.phoenix_server_spool_dir,
+              owner=params.ams_user,
+              mode = 0755,
+              group=params.user_group,
+              cd_access="a",
+              create_parents=True
+    )
+  pass
+
+  if 'ams-hbase-policy' in params.config['configurations']:
+    XmlConfig("hbase-policy.xml",
+            conf_dir = params.hbase_conf_dir,
+            configurations = params.config['configurations']['ams-hbase-policy'],
+            configuration_attributes=params.config['configuration_attributes']['ams-hbase-policy'],
+            owner = params.hbase_user,
+            group = params.user_group
+    )
+  # Manually overriding ownership of file installed by hadoop package
+  else:
+    File( format("{params.hbase_conf_dir}/hbase-policy.xml"),
+      owner = params.hbase_user,
+      group = params.user_group
+    )
+
+  File(format("{hbase_conf_dir}/hbase-env.sh"),
+       owner = params.hbase_user,
+       content=InlineTemplate(params.hbase_env_sh_template)
+  )
+
+  # Metrics properties
+  File(os.path.join(params.hbase_conf_dir, "hadoop-metrics2-hbase.properties"),
+         owner = params.hbase_user,
+         group = params.user_group,
+         content=Template("hadoop-metrics2-hbase.properties.j2")
+    )
+
+  # hbase_TemplateConfig( params.metric_prop_file_name,
+  #   tag = 'GANGLIA-MASTER' if name == 'master' else 'GANGLIA-RS'
+  # )
+
+  hbase_TemplateConfig('regionservers', user=params.hbase_user)
+
+  if params.security_enabled:
+    hbase_TemplateConfig( format("hbase_{name}_jaas.conf"), user=params.hbase_user)
+    hbase_TemplateConfig( format("hbase_client_jaas.conf"), user=params.hbase_user)
+    hbase_TemplateConfig( format("ams_zookeeper_jaas.conf"), user=params.hbase_user)
+
+  if name != "client":
+    Directory( params.hbase_pid_dir,
+               owner = params.hbase_user,
+               create_parents = True,
+               cd_access = "a",
+               mode = 0755,
+    )
+
+    Directory (params.hbase_log_dir,
+               owner = params.hbase_user,
+               create_parents = True,
+               cd_access = "a",
+               mode = 0755,
+    )
+
+  if name == "master":
+
+    if not params.is_local_fs_rootdir:
+      # If executing Stop All, HDFS is probably down
+      if action != 'stop':
+
+        params.HdfsResource(params.hbase_root_dir,
+                             type="directory",
+                             action="create_on_execute",
+                             owner=params.hbase_user,
+                             mode=0775
+        )
+
+        params.HdfsResource(params.hbase_staging_dir,
+                             type="directory",
+                             action="create_on_execute",
+                             owner=params.hbase_user,
+                             mode=0711
+        )
+
+        params.HdfsResource(None, action="execute")
+
+      if params.is_hbase_distributed:
+        #Workaround for status commands not aware of operating mode
+        File(format("{params.hbase_pid_dir}/distributed_mode"), action="create", mode=0644, owner=params.hbase_user)
+
+      pass
+
+    else:
+
+      local_root_dir = params.hbase_root_dir
+      #cut protocol name
+      if local_root_dir.startswith("file://"):
+        local_root_dir = local_root_dir[7:]
+        #otherwise assume dir name is provided as is
+
+      Directory(local_root_dir,
+                owner = params.hbase_user,
+                cd_access="a",
+                create_parents = True
+      )
+
+      Execute(('chown', '-R', params.hbase_user, local_root_dir),
+              sudo=True
+              )
+
+      File(format("{params.hbase_pid_dir}/distributed_mode"), action="delete", owner=params.hbase_user)
+
+  if params.hbase_log4j_props is not None:
+    File(format("{params.hbase_conf_dir}/log4j.properties"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.hbase_user,
+         content=params.hbase_log4j_props
+    )
+  elif os.path.exists(format("{params.hbase_conf_dir}/log4j.properties")):
+    File(format("{params.hbase_conf_dir}/log4j.properties"),
+      mode=0644,
+      group=params.user_group,
+      owner=params.hbase_user
+    )
+
+def hbase_TemplateConfig(name, tag=None, user=None):
+  import params
+
+  TemplateConfig( os.path.join(params.hbase_conf_dir, name),
+      owner = user,
+      template_tag = tag
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/hbase_master.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/hbase_master.py
new file mode 100755
index 0000000..74f7326
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/hbase_master.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from hbase import hbase
+from hbase_service import hbase_service
+from hbase_decommission import hbase_decommission
+
+
+class HbaseMaster(Script):
+  def install(self, env):
+    self.install_packages(env)
+
+  def configure(self, env, action = None):
+    import params
+    env.set_params(params)
+
+    hbase('master', action)
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env, action = 'start') # for security
+
+    hbase_service( 'master',
+      action = 'start'
+    )
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    hbase_service( 'master',
+      action = 'stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{pid_dir}/hbase-{hbase_user}-master.pid")
+    check_process_status(pid_file)
+
+  def decommission(self, env):
+    import params
+    env.set_params(params)
+
+    hbase_decommission(env)
+
+
+if __name__ == "__main__":
+  HbaseMaster().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/hbase_regionserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/hbase_regionserver.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/hbase_regionserver.py
new file mode 100755
index 0000000..a5121b8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/hbase_regionserver.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from hbase import hbase
+from hbase_service import hbase_service
+
+
+class HbaseRegionServer(Script):
+  def install(self, env):
+    self.install_packages(env)
+
+  def configure(self, env, action = None):
+    import params
+    env.set_params(params)
+
+    hbase('regionserver', action)
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env, action = 'start') # for security
+
+    hbase_service( 'regionserver',
+      action = 'start'
+    )
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    hbase_service( 'regionserver',
+      action = 'stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{pid_dir}/hbase-{hbase_user}-regionserver.pid")
+    check_process_status(pid_file)
+
+  def decommission(self, env):
+    print "Decommission not yet implemented!"
+
+
+if __name__ == "__main__":
+  HbaseRegionServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/hbase_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/hbase_service.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/hbase_service.py
new file mode 100755
index 0000000..3b542cb
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/hbase_service.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+def hbase_service(
+  name,
+  action = 'start'): # 'start' or 'stop' or 'status'
+
+    import params
+
+    role = name
+    cmd = format("{daemon_script} --config {hbase_conf_dir}")
+    pid_file = format("{hbase_pid_dir}/hbase-{hbase_user}-{role}.pid")
+    no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
+
+    if action == 'start':
+      daemon_cmd = format("{cmd} start {role}")
+
+      Execute ( daemon_cmd,
+        not_if = no_op_test,
+        user = params.hbase_user
+      )
+    elif action == 'stop':
+      daemon_cmd = format("{cmd} stop {role}")
+
+      Execute ( daemon_cmd,
+        user = params.hbase_user,
+        # BUGFIX: hbase regionserver sometimes hangs when nn is in safemode
+        timeout = 30,
+        on_timeout = format("{no_op_test} && kill -9 `cat {pid_file}`")
+      )
+
+      File(pid_file,
+        action = "delete",
+      )

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/metrics_collector.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/metrics_collector.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/metrics_collector.py
new file mode 100755
index 0000000..cf498ec
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/metrics_collector.py
@@ -0,0 +1,133 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_XML
+from ams import ams
+from ams_service import ams_service
+from hbase import hbase
+from status import check_service_status
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+
+class AmsCollector(Script):
+  def install(self, env):
+    self.install_packages(env)
+
+  def configure(self, env, action = None):
+    import params
+    env.set_params(params)
+    hbase('master', action)
+    hbase('regionserver', action)
+    ams(name='collector')
+
+  def start(self, env):
+    self.configure(env, action = 'start') # for security
+    # stop hanging components before start
+    ams_service('collector', action = 'stop')
+    ams_service('collector', action = 'start')
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+    # Sometimes, stop() may be called before start(), in case restart() is initiated right after installation
+    self.configure(env, action = 'stop') # for security
+    ams_service('collector', action = 'stop')
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_service_status(name='collector')
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class AmsCollectorDefault(AmsCollector):
+  def security_status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    props_value_check = {"hbase.security.authentication": "kerberos",
+                         "hbase.security.authorization": "true"}
+
+    props_empty_check = ["hbase.zookeeper.property.authProvider.1",
+                         "hbase.master.keytab.file",
+                         "hbase.master.kerberos.principal",
+                         "hbase.regionserver.keytab.file",
+                         "hbase.regionserver.kerberos.principal"
+                         ]
+    props_read_check = ['hbase.master.keytab.file', 'hbase.regionserver.keytab.file']
+    ams_hbase_site_expectations = build_expectations('hbase-site', props_value_check,
+                                                     props_empty_check,
+                                                     props_read_check)
+
+    expectations = {}
+    expectations.update(ams_hbase_site_expectations)
+
+    security_params = get_params_from_filesystem(status_params.ams_hbase_conf_dir,
+                                                 {'hbase-site.xml': FILE_TYPE_XML})
+
+    is_hbase_distributed = security_params['hbase-site']['hbase.cluster.distributed']
+    # for embedded mode, when HBase is backed by file, security state is SECURED_KERBEROS by definition when cluster is secured
+    if status_params.security_enabled and not is_hbase_distributed:
+      self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+      return
+
+    result_issues = validate_security_config_properties(security_params, expectations)
+
+    if not result_issues:  # If all validations passed successfully
+      try:
+        # Double check the dict before calling execute
+        if ('hbase-site' not in security_params or
+                'hbase.master.keytab.file' not in security_params['hbase-site'] or
+                'hbase.master.kerberos.principal' not in security_params['hbase-site']):
+          self.put_structured_out({"securityState": "UNSECURED"})
+          self.put_structured_out(
+            {"securityIssuesFound": "Keytab file or principal are not set property."})
+          return
+
+        cached_kinit_executor(status_params.kinit_path_local,
+                              status_params.hbase_user,
+                              security_params['hbase-site']['hbase.master.keytab.file'],
+                              security_params['hbase-site']['hbase.master.kerberos.principal'],
+                              status_params.hostname,
+                              status_params.tmp_dir)
+        self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+      except Exception as e:
+        self.put_structured_out({"securityState": "ERROR"})
+        self.put_structured_out({"securityStateErrorInfo": str(e)})
+    else:
+      issues = []
+      for cf in result_issues:
+        issues.append("Configuration file %s did not pass the validation. Reason: %s" % (
+          cf, result_issues[cf]))
+      self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class AmsCollectorWindows(AmsCollector):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env) # for security
+
+if __name__ == "__main__":
+  AmsCollector().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/metrics_monitor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/metrics_monitor.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/metrics_monitor.py
new file mode 100755
index 0000000..38da85f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/metrics_monitor.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from ams import ams
+from ams_service import ams_service
+from status import check_service_status
+
+class AmsMonitor(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env) # for security
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    ams(name='monitor')
+
+  def start(self, env):
+    self.configure(env) # for security
+
+    ams_service( 'monitor',
+                 action = 'start'
+    )
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    ams_service( 'monitor',
+                 action = 'stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_service_status(name='monitor')
+
+
+if __name__ == "__main__":
+  AmsMonitor().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/params.py
new file mode 100755
index 0000000..ba07c74
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/params.py
@@ -0,0 +1,254 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from functions import calc_xmn_from_xms
+from functions import check_append_heap_property
+from functions import trim_heap_property
+
+from resource_management import *
+import status_params
+from ambari_commons import OSCheck
+
+
+if OSCheck.is_windows_family():
+  from params_windows import *
+else:
+  from params_linux import *
+# server configurations
+config = Script.get_config()
+exec_tmp_dir = Script.get_tmp_dir()
+
+def get_combined_memory_mb(value1, value2):
+  try:
+    part1 = int(value1.strip()[:-1]) if value1.lower().strip()[-1:] == 'm' else int(value1)
+    part2 = int(value2.strip()[:-1]) if value2.lower().strip()[-1:] == 'm' else int(value2)
+    return str(part1 + part2) + 'm'
+  except:
+    return None
+pass
+
+#AMBARI_METRICS data
+ams_pid_dir = status_params.ams_collector_pid_dir
+
+ams_collector_script = "/usr/sbin/ambari-metrics-collector"
+ams_collector_pid_dir = status_params.ams_collector_pid_dir
+ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
+if 'cluster-env' in config['configurations'] and \
+    'metrics_collector_vip_host' in config['configurations']['cluster-env']:
+  metric_collector_host = config['configurations']['cluster-env']['metrics_collector_vip_host']
+else:
+  metric_collector_host = ams_collector_hosts[0]
+if 'cluster-env' in config['configurations'] and \
+    'metrics_collector_vip_port' in config['configurations']['cluster-env']:
+  metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']
+else:
+  metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
+  if metric_collector_web_address.find(':') != -1:
+    metric_collector_port = metric_collector_web_address.split(':')[1]
+  else:
+    metric_collector_port = '6188'
+
+ams_collector_log_dir = config['configurations']['ams-env']['metrics_collector_log_dir']
+ams_monitor_log_dir = config['configurations']['ams-env']['metrics_monitor_log_dir']
+
+ams_monitor_dir = "/usr/lib/python2.6/site-packages/resource_monitoring"
+ams_monitor_pid_dir = status_params.ams_monitor_pid_dir
+ams_monitor_script = "/usr/sbin/ambari-metrics-monitor"
+
+ams_hbase_home_dir = "/usr/lib/ams-hbase/"
+
+ams_hbase_normalizer_enabled = default("/configurations/ams-hbase-site/hbase.normalizer.enabled", None)
+ams_hbase_fifo_compaction_enabled = default("/configurations/ams-site/timeline.metrics.hbase.fifo.compaction.enabled", None)
+
+#hadoop params
+
+hbase_excluded_hosts = config['commandParams']['excluded_hosts']
+hbase_drain_only = config['commandParams']['mark_draining_only']
+hbase_included_hosts = config['commandParams']['included_hosts']
+
+hbase_user = status_params.hbase_user
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+hbase_root_dir = config['configurations']['ams-hbase-site']['hbase.rootdir']
+hbase_pid_dir = status_params.hbase_pid_dir
+
+is_hbase_distributed = config['configurations']['ams-hbase-site']['hbase.cluster.distributed']
+is_local_fs_rootdir = hbase_root_dir.startswith('file://')
+is_ams_distributed = config['configurations']['ams-site']['timeline.metrics.service.operation.mode'] == 'distributed'
+
+# security is disabled for embedded mode, when HBase is backed by file
+security_enabled = False if not is_hbase_distributed else config['configurations']['cluster-env']['security_enabled']
+
+# this is "hadoop-metrics.properties" for 1.x stacks
+metric_prop_file_name = "hadoop-metrics2-hbase.properties"
+
+# not supporting 32 bit jdk.
+java64_home = config['hostLevelParams']['java_home']
+java_version = int(config['hostLevelParams']['java_version'])
+
+metrics_collector_heapsize = default('/configurations/ams-env/metrics_collector_heapsize', "512")
+host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
+metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 60)
+
+hbase_log_dir = config['configurations']['ams-hbase-env']['hbase_log_dir']
+hbase_classpath_additional = default("/configurations/ams-hbase-env/hbase_classpath_additional", None)
+master_heapsize = config['configurations']['ams-hbase-env']['hbase_master_heapsize']
+regionserver_heapsize = config['configurations']['ams-hbase-env']['hbase_regionserver_heapsize']
+
+# Check if hbase java options already have appended "m". If Yes, remove the trailing m.
+metrics_collector_heapsize = check_append_heap_property(str(metrics_collector_heapsize), "m")
+master_heapsize = check_append_heap_property(str(master_heapsize), "m")
+regionserver_heapsize = check_append_heap_property(str(regionserver_heapsize), "m")
+
+regionserver_xmn_max = default('/configurations/ams-hbase-env/hbase_regionserver_xmn_max', None)
+if regionserver_xmn_max:
+  regionserver_xmn_max = int(trim_heap_property(str(regionserver_xmn_max), "m"))
+  regionserver_xmn_percent = config['configurations']['ams-hbase-env']['hbase_regionserver_xmn_ratio']
+  regionserver_xmn_size = calc_xmn_from_xms(regionserver_heapsize, regionserver_xmn_percent, regionserver_xmn_max)
+else:
+  regionserver_xmn_size = config['configurations']['ams-hbase-env']['regionserver_xmn_size']
+pass
+
+hbase_master_xmn_size = config['configurations']['ams-hbase-env']['hbase_master_xmn_size']
+hbase_master_maxperm_size = config['configurations']['ams-hbase-env']['hbase_master_maxperm_size']
+
+# Check if hbase java options already have appended "m". If Yes, remove the trailing m.
+hbase_master_maxperm_size = check_append_heap_property(str(hbase_master_maxperm_size), "m")
+hbase_master_xmn_size = check_append_heap_property(str(hbase_master_xmn_size), "m")
+regionserver_xmn_size = check_append_heap_property(str(regionserver_xmn_size), "m")
+
+# Choose heap size for embedded mode as sum of master + regionserver
+if not is_hbase_distributed:
+  hbase_heapsize = get_combined_memory_mb(master_heapsize, regionserver_heapsize)
+  if hbase_heapsize is None:
+    hbase_heapsize = master_heapsize
+else:
+  hbase_heapsize = master_heapsize
+
+max_open_files_limit = default("/configurations/ams-hbase-env/max_open_files_limit", "32768")
+
+if not is_hbase_distributed:
+  zookeeper_quorum_hosts = 'localhost'
+  zookeeper_clientPort = '61181'
+else:
+  zookeeper_quorum_hosts = default("/hostname", 'localhost')
+  if 'zoo.cfg' in config['configurations'] and 'clientPort' in config['configurations']['zoo.cfg']:
+    zookeeper_clientPort = config['configurations']['zoo.cfg']['clientPort']
+  else:
+    zookeeper_clientPort = '2181'
+
+ams_checkpoint_dir = config['configurations']['ams-site']['timeline.metrics.aggregator.checkpoint.dir']
+hbase_pid_dir = status_params.hbase_pid_dir
+_hbase_tmp_dir = config['configurations']['ams-hbase-site']['hbase.tmp.dir']
+hbase_tmp_dir = substitute_vars(_hbase_tmp_dir, config['configurations']['ams-hbase-site'])
+_zookeeper_data_dir = config['configurations']['ams-hbase-site']['hbase.zookeeper.property.dataDir']
+zookeeper_data_dir = substitute_vars(_zookeeper_data_dir, config['configurations']['ams-hbase-site'])
+# TODO UPGRADE default, update site during upgrade
+_local_dir_conf = default('/configurations/ams-hbase-site/hbase.local.dir', "${hbase.tmp.dir}/local")
+local_dir = substitute_vars(_local_dir_conf, config['configurations']['ams-hbase-site'])
+
+phoenix_max_global_mem_percent = default('/configurations/ams-site/phoenix.query.maxGlobalMemoryPercentage', '20')
+phoenix_client_spool_dir = default('/configurations/ams-site/phoenix.spool.directory', '/tmp')
+phoenix_server_spool_dir = default('/configurations/ams-hbase-site/phoenix.spool.directory', '/tmp')
+# Substitute vars if present
+phoenix_client_spool_dir = substitute_vars(phoenix_client_spool_dir, config['configurations']['ams-hbase-site'])
+phoenix_server_spool_dir = substitute_vars(phoenix_server_spool_dir, config['configurations']['ams-hbase-site'])
+
+client_jaas_config_file = format("{hbase_conf_dir}/hbase_client_jaas.conf")
+master_jaas_config_file = format("{hbase_conf_dir}/hbase_master_jaas.conf")
+regionserver_jaas_config_file = format("{hbase_conf_dir}/hbase_regionserver_jaas.conf")
+
+rs_hosts = ["localhost"]
+
+smoke_test_user = config['configurations']['cluster-env']['smokeuser']
+smokeuser_permissions = "RWXCA"
+service_check_data = functions.get_unique_id_and_date()
+user_group = config['configurations']['cluster-env']["user_group"]
+hadoop_user = "hadoop"
+
+kinit_cmd = ""
+
+if security_enabled:
+  _hostname_lowercase = config['hostname'].lower()
+  client_jaas_config_file = format("{hbase_conf_dir}/hbase_client_jaas.conf")
+  smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+  hbase_user_keytab = config['configurations']['ams-hbase-env']['hbase_user_keytab']
+
+  ams_collector_jaas_config_file = format("{hbase_conf_dir}/ams_collector_jaas.conf")
+  ams_collector_keytab_path = config['configurations']['ams-hbase-security-site']['hbase.myclient.keytab']
+  ams_collector_jaas_princ = config['configurations']['ams-hbase-security-site']['hbase.myclient.principal'].replace('_HOST',_hostname_lowercase)
+
+  ams_zookeeper_jaas_config_file = format("{hbase_conf_dir}/ams_zookeeper_jaas.conf")
+  ams_zookeeper_keytab = config['configurations']['ams-hbase-security-site']['ams.zookeeper.keytab']
+  ams_zookeeper_principal_name = config['configurations']['ams-hbase-security-site']['ams.zookeeper.principal'].replace('_HOST',_hostname_lowercase)
+
+  master_jaas_config_file = format("{hbase_conf_dir}/hbase_master_jaas.conf")
+  master_keytab_path = config['configurations']['ams-hbase-security-site']['hbase.master.keytab.file']
+  master_jaas_princ = config['configurations']['ams-hbase-security-site']['hbase.master.kerberos.principal'].replace('_HOST',_hostname_lowercase)
+
+  regionserver_jaas_config_file = format("{hbase_conf_dir}/hbase_regionserver_jaas.conf")
+  regionserver_keytab_path = config['configurations']['ams-hbase-security-site']['hbase.regionserver.keytab.file']
+  regionserver_jaas_princ = config['configurations']['ams-hbase-security-site']['hbase.regionserver.kerberos.principal'].replace('_HOST',_hostname_lowercase)
+
+  zk_servicename = ams_zookeeper_principal_name.rpartition('/')[0]
+
+#log4j.properties
+if (('ams-hbase-log4j' in config['configurations']) and ('content' in config['configurations']['ams-hbase-log4j'])):
+  hbase_log4j_props = config['configurations']['ams-hbase-log4j']['content']
+else:
+  hbase_log4j_props = None
+
+if (('ams-log4j' in config['configurations']) and ('content' in config['configurations']['ams-log4j'])):
+  log4j_props = config['configurations']['ams-log4j']['content']
+else:
+  log4j_props = None
+
+hbase_env_sh_template = config['configurations']['ams-hbase-env']['content']
+ams_env_sh_template = config['configurations']['ams-env']['content']
+
+hbase_staging_dir = default("/configurations/ams-hbase-site/hbase.bulkload.staging.dir", "/amshbase/staging")
+
+#for create_hdfs_directory
+hostname = config["hostname"]
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+
+
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
+import functools
+#create partial functions with common arguments for every HdfsResource call
+#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
+ )

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/params_linux.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/params_linux.py
new file mode 100755
index 0000000..838e987
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/params_linux.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from ambari_commons import OSCheck
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+
+config = Script.get_config()
+
+ams_collector_conf_dir = "/etc/ambari-metrics-collector/conf"
+ams_monitor_conf_dir = "/etc/ambari-metrics-monitor/conf/"
+ams_user = config['configurations']['ams-env']['ambari_metrics_user']
+#RPM versioning support
+rpm_version = default("/configurations/hadoop-env/rpm_version", None)
+
+#hadoop params
+if rpm_version is not None:
+  #RPM versioning support
+  rpm_version = default("/configurations/hadoop-env/rpm_version", None)
+
+hadoop_native_lib = format("/usr/lib/ams-hbase/lib/hadoop-native")
+hadoop_bin_dir = "/usr/bin"
+daemon_script = "/usr/lib/ams-hbase/bin/hbase-daemon.sh"
+region_mover = "/usr/lib/ams-hbase/bin/region_mover.rb"
+region_drainer = "/usr/lib/ams-hbase/bin/draining_servers.rb"
+hbase_cmd = "/usr/lib/ams-hbase/bin/hbase"
+
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+hbase_conf_dir = "/etc/ams-hbase/conf"
+
+limits_conf_dir = "/etc/security/limits.d"
+sudo = AMBARI_SUDO_BINARY

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/params_windows.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/params_windows.py
new file mode 100755
index 0000000..acb5bba
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/params_windows.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from resource_management.libraries.script.script import Script
+
+
+config = Script.get_config()
+
+hadoop_user = config["configurations"]["cluster-env"]["hadoop.user.name"]
+ams_user = hadoop_user
+
+try:
+  ams_collector_conf_dir = os.environ["COLLECTOR_CONF_DIR"]
+  ams_collector_home_dir = os.environ["COLLECTOR_HOME"]
+  hbase_cmd = os.path.join(os.environ["COLLECTOR_HOME"], "hbase", "bin", "hbase.cmd")
+  hbase_conf_dir = os.path.join(os.environ["COLLECTOR_HOME"], "hbase", "conf")
+except:
+  ams_collector_conf_dir = None
+  ams_collector_home_dir = None
+  hbase_cmd = None
+  hbase_conf_dir = None
+
+try:
+  ams_monitor_conf_dir = os.environ["MONITOR_CONF_DIR"]
+  ams_monitor_home_dir = os.environ["MONITOR_HOME"]
+except:
+  ams_monitor_conf_dir = None
+  ams_monitor_home_dir = None
+
+hadoop_native_lib = os.path.join(os.environ["HADOOP_HOME"], "bin")
+hadoop_bin_dir = os.path.join(os.environ["HADOOP_HOME"], "bin")
+hadoop_conf_dir = os.path.join(os.environ["HADOOP_HOME"], "conf")
+
+from service_mapping import *

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/service_check.py
new file mode 100755
index 0000000..668db55
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/service_check.py
@@ -0,0 +1,165 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.core.logger import Logger
+from resource_management.core.base import Fail
+from resource_management import Script
+from resource_management import Template
+
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+
+import httplib
+import urllib
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+import os
+import random
+import time
+import socket
+
+
+class AMSServiceCheck(Script):
+  AMS_METRICS_POST_URL = "/ws/v1/timeline/metrics/"
+  AMS_METRICS_GET_URL = "/ws/v1/timeline/metrics?%s"
+  AMS_CONNECT_TRIES = 30
+  AMS_CONNECT_TIMEOUT = 15
+
+  @OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+  def service_check(self, env):
+    from resource_management.libraries.functions.windows_service_utils import check_windows_service_exists
+    import params
+
+    env.set_params(params)
+
+    #Just check that the services were correctly installed
+    #Check the monitor on all hosts
+    Logger.info("Metrics Monitor service check was started.")
+    if not check_windows_service_exists(params.ams_monitor_win_service_name):
+      raise Fail("Metrics Monitor service was not properly installed. Check the logs and retry the installation.")
+    #Check the collector only where installed
+    if params.ams_collector_home_dir and os.path.isdir(params.ams_collector_home_dir):
+      Logger.info("Metrics Collector service check was started.")
+      if not check_windows_service_exists(params.ams_collector_win_service_name):
+        raise Fail("Metrics Collector service was not properly installed. Check the logs and retry the installation.")
+
+  @OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+  def service_check(self, env):
+    import params
+
+    Logger.info("Ambari Metrics service check was started.")
+    env.set_params(params)
+
+    random_value1 = random.random()
+    headers = {"Content-type": "application/json"}
+
+    for i in xrange(0, self.AMS_CONNECT_TRIES):
+      try:
+        current_time = int(time.time()) * 1000
+        metric_json = Template('smoketest_metrics.json.j2', hostname=params.hostname, random1=random_value1,
+                           current_time=current_time).get_content()
+        Logger.info("Generated metrics:\n%s" % metric_json)
+
+        Logger.info("Connecting (POST) to %s:%s%s" % (params.metric_collector_host,
+                                                      params.metric_collector_port,
+                                                      self.AMS_METRICS_POST_URL))
+        conn = httplib.HTTPConnection(params.metric_collector_host,
+                                        int(params.metric_collector_port))
+        conn.request("POST", self.AMS_METRICS_POST_URL, metric_json, headers)
+
+        response = conn.getresponse()
+        Logger.info("Http response: %s %s" % (response.status, response.reason))
+      except (httplib.HTTPException, socket.error) as ex:
+        if i < self.AMS_CONNECT_TRIES - 1:  #range/xrange returns items from start to end-1
+          time.sleep(self.AMS_CONNECT_TIMEOUT)
+          Logger.info("Connection failed. Next retry in %s seconds."
+                      % (self.AMS_CONNECT_TIMEOUT))
+          continue
+        else:
+          raise Fail("Metrics were not saved. Service check has failed. "
+               "\nConnection failed.")
+
+      data = response.read()
+      Logger.info("Http data: %s" % data)
+      conn.close()
+
+      if response.status == 200:
+        Logger.info("Metrics were saved.")
+        break
+      else:
+        Logger.info("Metrics were not saved. Service check has failed.")
+        if i < self.AMS_CONNECT_TRIES - 1:  #range/xrange returns items from start to end-1
+          time.sleep(self.AMS_CONNECT_TIMEOUT)
+          Logger.info("Next retry in %s seconds."
+                      % (self.AMS_CONNECT_TIMEOUT))
+        else:
+          raise Fail("Metrics were not saved. Service check has failed. POST request status: %s %s \n%s" %
+                     (response.status, response.reason, data))
+
+    get_metrics_parameters = {
+      "metricNames": "AMBARI_METRICS.SmokeTest.FakeMetric",
+      "appId": "amssmoketestfake",
+      "hostname": params.hostname,
+      "startTime": current_time - 60000,
+      "endTime": current_time + 61000,
+      "precision": "seconds",
+      "grouped": "false",
+    }
+    encoded_get_metrics_parameters = urllib.urlencode(get_metrics_parameters)
+
+    Logger.info("Connecting (GET) to %s:%s%s" % (params.metric_collector_host,
+                                                 params.metric_collector_port,
+                                              self.AMS_METRICS_GET_URL % encoded_get_metrics_parameters))
+
+    conn = httplib.HTTPConnection(params.metric_collector_host,
+                                  int(params.metric_collector_port))
+    conn.request("GET", self.AMS_METRICS_GET_URL % encoded_get_metrics_parameters)
+    response = conn.getresponse()
+    Logger.info("Http response: %s %s" % (response.status, response.reason))
+
+    data = response.read()
+    Logger.info("Http data: %s" % data)
+    conn.close()
+
+    if response.status == 200:
+      Logger.info("Metrics were retrieved.")
+    else:
+      Logger.info("Metrics were not retrieved. Service check has failed.")
+      raise Fail("Metrics were not retrieved. Service check has failed. GET request status: %s %s \n%s" %
+                 (response.status, response.reason, data))
+    data_json = json.loads(data)
+
+    def floats_eq(f1, f2, delta):
+      return abs(f1-f2) < delta
+
+    for metrics_data in data_json["metrics"]:
+      if (str(current_time) in metrics_data["metrics"] and str(current_time + 1000) in metrics_data["metrics"]
+          and floats_eq(metrics_data["metrics"][str(current_time)], random_value1, 0.0000001)
+          and floats_eq(metrics_data["metrics"][str(current_time + 1000)], current_time, 1)):
+        Logger.info("Values %s and %s were found in the response." % (random_value1, current_time))
+        break
+      pass
+    else:
+      Logger.info("Values %s and %s were not found in the response." % (random_value1, current_time))
+      raise Fail("Values %s and %s were not found in the response." % (random_value1, current_time))
+
+    Logger.info("Ambari Metrics service check is finished.")
+
+if __name__ == "__main__":
+  AMSServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/service_mapping.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/service_mapping.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/service_mapping.py
new file mode 100755
index 0000000..2eeb427
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/service_mapping.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+ams_collector_win_service_name = "AmbariMetricsCollector"
+ams_monitor_win_service_name = "AmbariMetricsHostMonitoring"
+ams_embedded_hbase_win_service_name = "ams_hbase_master"
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/split_points.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/split_points.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/split_points.py
new file mode 100755
index 0000000..fa4deaf
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/split_points.py
@@ -0,0 +1,236 @@
+# !/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+import sys
+import re
+import math
+import collections
+import ast
+
+metric_filename_ext = '.txt'
+# 5 regions for higher order aggregate tables
+other_region_static_count = 6
+# Max equidistant points to return per service
+max_equidistant_points = 50
+
+b_bytes = 1
+k_bytes = 1 << 10  # 1024
+m_bytes = 1 << 20  # 1024^2
+g_bytes = 1 << 30  # 1024^3
+t_bytes = 1 << 40  # 1024^4
+p_bytes = 1 << 50  # 1024^5
+
+def to_number(s):
+  try:
+    return int(re.sub("\D", "", s))
+  except ValueError:
+    return None
+
+def format_Xmx_size_to_bytes(value, default='b'):
+  strvalue = str(value).lower()
+  if len(strvalue) == 0:
+    return 0
+  modifier = strvalue[-1]
+
+  if modifier == ' ' or modifier in "0123456789":
+    modifier = default
+
+  m = {
+    modifier == 'b': b_bytes,
+    modifier == 'k': k_bytes,
+    modifier == 'm': m_bytes,
+    modifier == 'g': g_bytes,
+    modifier == 't': t_bytes,
+    modifier == 'p': p_bytes
+    } [1]
+  return to_number(strvalue) * m
+
+# Class that takes AMS HBase configs as input and determines the Region
+# pre-splits based on selected services also passed as a parameter to the class.
+class FindSplitPointsForAMSRegions():
+
+  def __init__(self, ams_hbase_site, ams_hbase_env, serviceMetricsDir,
+               operation_mode = 'embedded', services = None):
+    self.ams_hbase_site = ams_hbase_site
+    self.ams_hbase_env = ams_hbase_env
+    self.serviceMetricsDir = serviceMetricsDir
+    self.services = services
+    self.mode = operation_mode
+    # Add host metrics if not present as input
+    if self.services and 'HOST' not in self.services:
+      self.services.append('HOST')
+
+    # Initialize before user
+    self.initialize()
+
+  def initialize(self):
+    # calculate regions based on available memory
+    self.initialize_region_counts()
+    self.initialize_ordered_set_of_metrics()
+
+  def initialize_region_counts(self):
+    try:
+      xmx_master_bytes = format_Xmx_size_to_bytes(self.ams_hbase_env['hbase_master_heapsize'], 'm')
+      xmx_region_bytes = 0
+      if "hbase_regionserver_heapsize" in self.ams_hbase_env:
+        xmx_region_bytes = format_Xmx_size_to_bytes(self.ams_hbase_env['hbase_regionserver_heapsize'], 'm')
+      xmx_bytes = xmx_master_bytes + xmx_region_bytes
+      if self.mode == 'distributed':
+        xmx_bytes = xmx_region_bytes
+
+      memstore_max_mem = float(self.ams_hbase_site['hbase.regionserver.global.memstore.lowerLimit']) * xmx_bytes
+      memstore_flush_size = format_Xmx_size_to_bytes(self.ams_hbase_site['hbase.hregion.memstore.flush.size'])
+
+      max_inmemory_regions = (memstore_max_mem / memstore_flush_size) - other_region_static_count
+      print 'max_inmemory_regions: %s' % max_inmemory_regions
+
+      if max_inmemory_regions > 2:
+        # Lets say total = 12, so we have 7 regions to allocate between
+        # METRIC_RECORD and METRIC_AGGREGATE tables, desired = (5, 2)
+        self.desired_precision_region_count = int(math.floor(0.8 * max_inmemory_regions))
+        self.desired_aggregate_region_count = int(max_inmemory_regions - self.desired_precision_region_count)
+      else:
+        self.desired_precision_region_count = 1
+        self.desired_aggregate_region_count = 1
+
+    except:
+      print('Bad config settings, could not calculate max regions available.')
+    pass
+
+  def initialize_ordered_set_of_metrics(self):
+    onlyServicefiles = [ f for f in os.listdir(self.serviceMetricsDir) if
+                  os.path.isfile(os.path.join(self.serviceMetricsDir, f)) ]
+
+    metrics = set()
+
+    for file in onlyServicefiles:
+      # Process for services selected at deploy time or all services if
+      # services arg is not passed
+      if self.services is None or file.rstrip(metric_filename_ext) in self.services:
+        print 'Processing file: %s' % os.path.join(self.serviceMetricsDir, file)
+        service_metrics = set()
+        with open(os.path.join(self.serviceMetricsDir, file), 'r') as f:
+          for metric in f:
+            service_metrics.add(metric.strip())
+          pass
+        pass
+        metrics.update(self.find_equidistant_metrics(list(sorted(service_metrics))))
+      pass
+    pass
+
+    self.metrics = sorted(metrics)
+    print 'metrics length: %s' % len(self.metrics)
+
+  # Pick 50 metric points for each service that are equidistant from
+  # each other for a service
+  def find_equidistant_metrics(self, service_metrics):
+    equi_metrics = []
+    idx = len(service_metrics) / max_equidistant_points
+    if idx == 0:
+      return service_metrics
+    pass
+
+    index = idx
+    for i in range(0, max_equidistant_points - 1):
+      equi_metrics.append(service_metrics[index - 1])
+      index += idx
+    pass
+
+    return equi_metrics
+
+  def get_split_points(self):
+    split_points = collections.namedtuple('SplitPoints', [ 'precision', 'aggregate' ])
+    split_points.precision = []
+    split_points.aggregate = []
+
+    metric_list = list(self.metrics)
+    metrics_total = len(metric_list)
+
+    if self.desired_precision_region_count > 1:
+      idx = int(math.ceil(metrics_total / self.desired_precision_region_count))
+      index = idx
+      for i in range(0, self.desired_precision_region_count - 1):
+        if index < metrics_total - 1:
+          split_points.precision.append(metric_list[index])
+          index += idx
+
+    if self.desired_aggregate_region_count > 1:
+      idx = int(math.ceil(metrics_total / self.desired_aggregate_region_count))
+      index = idx
+      for i in range(0, self.desired_aggregate_region_count - 1):
+        if index < metrics_total - 1:
+          split_points.aggregate.append(metric_list[index])
+          index += idx
+
+    return split_points
+  pass
+
+def main(argv = None):
+  scriptDir = os.path.realpath(os.path.dirname(argv[0]))
+  serviceMetricsDir = os.path.join(scriptDir, os.pardir, 'files', 'service-metrics')
+
+  if os.path.exists(serviceMetricsDir):
+    onlyargs = argv[1:]
+    if len(onlyargs) < 3:
+      sys.stderr.write("Usage: dict(ams-hbase-site) dict(ams-hbase-env) list(services)\n")
+      sys.exit(2)
+    pass
+
+    ams_hbase_site = None
+    ams_hbase_env = None
+    services = None
+    try:
+      ams_hbase_site = ast.literal_eval(str(onlyargs[0]))
+      ams_hbase_env = ast.literal_eval(str(onlyargs[1]))
+      services = onlyargs[2]
+      if services:
+        services = str(services).split(',')
+      pass
+    except Exception, ex:
+      sys.stderr.write(str(ex))
+      sys.stderr.write("\nUsage: Expected items not found in input. Found "
+                      " ams-hbase-site => {0}, ams-hbase-env => {1},"
+                      " services => {2}".format(ams_hbase_site, ams_hbase_env, services))
+      sys.exit(2)
+
+    print '--------- AMS Regions Split point finder ---------'
+    print 'Services: %s' % services
+
+    mode = 'distributed' if 'hbase.rootdir' in ams_hbase_site and \
+                            'hdfs' in ams_hbase_site['hbase.rootdir'] else \
+                            'embedded'
+
+    split_point_finder = FindSplitPointsForAMSRegions(
+      ams_hbase_site, ams_hbase_env, serviceMetricsDir, mode, services)
+
+    result = split_point_finder.get_split_points()
+    print 'Split points for precision table : %s' % len(result.precision)
+    print 'precision: %s' % str(result.precision)
+    print 'Split points for aggregate table : %s' % len(result.aggregate)
+    print 'aggregate: %s' % str(result.aggregate)
+
+    return 0
+
+  else:
+    print 'Cannot find service metrics dir in %s' % scriptDir
+
+if __name__ == '__main__':
+  main(sys.argv)

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/status.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/status.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/status.py
new file mode 100755
index 0000000..59466ad
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/status.py
@@ -0,0 +1,46 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+import os
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def check_service_status(name):
+  if name=='collector':
+    pid_file = format("{ams_collector_pid_dir}/ambari-metrics-collector.pid")
+    check_process_status(pid_file)
+    pid_file = format("{hbase_pid_dir}/hbase-{hbase_user}-master.pid")
+    check_process_status(pid_file)
+    if os.path.exists(format("{hbase_pid_dir}/distributed_mode")):
+      pid_file = format("{hbase_pid_dir}/hbase-{hbase_user}-regionserver.pid")
+      check_process_status(pid_file)
+
+  elif name == 'monitor':
+    pid_file = format("{ams_monitor_pid_dir}/ambari-metrics-monitor.pid")
+    check_process_status(pid_file)
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def check_service_status(name):
+  import service_mapping
+  if name=='collector':
+    check_windows_service_status(service_mapping.ams_collector_win_service_name)
+  elif name == 'monitor':
+    check_windows_service_status(service_mapping.ams_monitor_win_service_name)


[33/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/hive-schema-0.12.0.mysql.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/hive-schema-0.12.0.mysql.sql b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/hive-schema-0.12.0.mysql.sql
new file mode 100755
index 0000000..bacee9e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/hive-schema-0.12.0.mysql.sql
@@ -0,0 +1,777 @@
+-- MySQL dump 10.13  Distrib 5.5.25, for osx10.6 (i386)
+--
+-- Host: localhost    Database: test
+-- ------------------------------------------------------
+-- Server version	5.5.25
+
+/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
+/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
+/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
+/*!40101 SET NAMES utf8 */;
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+
+--
+-- Table structure for table `BUCKETING_COLS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `BUCKETING_COLS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `BUCKET_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `BUCKETING_COLS_N49` (`SD_ID`),
+  CONSTRAINT `BUCKETING_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `CDS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `CDS` (
+  `CD_ID` bigint(20) NOT NULL,
+  PRIMARY KEY (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `COLUMNS_V2`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `COLUMNS_V2` (
+  `CD_ID` bigint(20) NOT NULL,
+  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TYPE_NAME` varchar(4000) DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`CD_ID`,`COLUMN_NAME`),
+  KEY `COLUMNS_V2_N49` (`CD_ID`),
+  CONSTRAINT `COLUMNS_V2_FK1` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DATABASE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DATABASE_PARAMS` (
+  `DB_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(180) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`DB_ID`,`PARAM_KEY`),
+  KEY `DATABASE_PARAMS_N49` (`DB_ID`),
+  CONSTRAINT `DATABASE_PARAMS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DBS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DBS` (
+  `DB_ID` bigint(20) NOT NULL,
+  `DESC` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `DB_LOCATION_URI` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`DB_ID`),
+  UNIQUE KEY `UNIQUE_DATABASE` (`NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DB_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DB_PRIVS` (
+  `DB_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `DB_ID` bigint(20) DEFAULT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `DB_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`DB_GRANT_ID`),
+  UNIQUE KEY `DBPRIVILEGEINDEX` (`DB_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`DB_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `DB_PRIVS_N49` (`DB_ID`),
+  CONSTRAINT `DB_PRIVS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `GLOBAL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `GLOBAL_PRIVS` (
+  `USER_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `USER_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`USER_GRANT_ID`),
+  UNIQUE KEY `GLOBALPRIVILEGEINDEX` (`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`USER_PRIV`,`GRANTOR`,`GRANTOR_TYPE`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `IDXS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `IDXS` (
+  `INDEX_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `DEFERRED_REBUILD` bit(1) NOT NULL,
+  `INDEX_HANDLER_CLASS` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INDEX_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INDEX_TBL_ID` bigint(20) DEFAULT NULL,
+  `LAST_ACCESS_TIME` int(11) NOT NULL,
+  `ORIG_TBL_ID` bigint(20) DEFAULT NULL,
+  `SD_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`INDEX_ID`),
+  UNIQUE KEY `UNIQUEINDEX` (`INDEX_NAME`,`ORIG_TBL_ID`),
+  KEY `IDXS_N51` (`SD_ID`),
+  KEY `IDXS_N50` (`INDEX_TBL_ID`),
+  KEY `IDXS_N49` (`ORIG_TBL_ID`),
+  CONSTRAINT `IDXS_FK1` FOREIGN KEY (`ORIG_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
+  CONSTRAINT `IDXS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+  CONSTRAINT `IDXS_FK3` FOREIGN KEY (`INDEX_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `INDEX_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `INDEX_PARAMS` (
+  `INDEX_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`INDEX_ID`,`PARAM_KEY`),
+  KEY `INDEX_PARAMS_N49` (`INDEX_ID`),
+  CONSTRAINT `INDEX_PARAMS_FK1` FOREIGN KEY (`INDEX_ID`) REFERENCES `IDXS` (`INDEX_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `NUCLEUS_TABLES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `NUCLEUS_TABLES` (
+  `CLASS_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TYPE` varchar(4) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `OWNER` varchar(2) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `VERSION` varchar(20) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `INTERFACE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`CLASS_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITIONS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITIONS` (
+  `PART_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `LAST_ACCESS_TIME` int(11) NOT NULL,
+  `PART_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `SD_ID` bigint(20) DEFAULT NULL,
+  `TBL_ID` bigint(20) DEFAULT NULL,
+  `LINK_TARGET_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`PART_ID`),
+  UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`),
+  KEY `PARTITIONS_N49` (`TBL_ID`),
+  KEY `PARTITIONS_N50` (`SD_ID`),
+  KEY `PARTITIONS_N51` (`LINK_TARGET_ID`),
+  CONSTRAINT `PARTITIONS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
+  CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+  CONSTRAINT `PARTITIONS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_EVENTS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_EVENTS` (
+  `PART_NAME_ID` bigint(20) NOT NULL,
+  `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `EVENT_TIME` bigint(20) NOT NULL,
+  `EVENT_TYPE` int(11) NOT NULL,
+  `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_NAME_ID`),
+  KEY `PARTITIONEVENTINDEX` (`PARTITION_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_KEYS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_KEYS` (
+  `TBL_ID` bigint(20) NOT NULL,
+  `PKEY_COMMENT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PKEY_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PKEY_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`TBL_ID`,`PKEY_NAME`),
+  KEY `PARTITION_KEYS_N49` (`TBL_ID`),
+  CONSTRAINT `PARTITION_KEYS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_KEY_VALS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_KEY_VALS` (
+  `PART_ID` bigint(20) NOT NULL,
+  `PART_KEY_VAL` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`PART_ID`,`INTEGER_IDX`),
+  KEY `PARTITION_KEY_VALS_N49` (`PART_ID`),
+  CONSTRAINT `PARTITION_KEY_VALS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_PARAMS` (
+  `PART_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_ID`,`PARAM_KEY`),
+  KEY `PARTITION_PARAMS_N49` (`PART_ID`),
+  CONSTRAINT `PARTITION_PARAMS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PART_COL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PART_COL_PRIVS` (
+  `PART_COLUMN_GRANT_ID` bigint(20) NOT NULL,
+  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_ID` bigint(20) DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_COLUMN_GRANT_ID`),
+  KEY `PART_COL_PRIVS_N49` (`PART_ID`),
+  KEY `PARTITIONCOLUMNPRIVILEGEINDEX` (`PART_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  CONSTRAINT `PART_COL_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PART_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PART_PRIVS` (
+  `PART_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_ID` bigint(20) DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_GRANT_ID`),
+  KEY `PARTPRIVILEGEINDEX` (`PART_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `PART_PRIVS_N49` (`PART_ID`),
+  CONSTRAINT `PART_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `ROLES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `ROLES` (
+  `ROLE_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `ROLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`ROLE_ID`),
+  UNIQUE KEY `ROLEENTITYINDEX` (`ROLE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `ROLE_MAP`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `ROLE_MAP` (
+  `ROLE_GRANT_ID` bigint(20) NOT NULL,
+  `ADD_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `ROLE_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`ROLE_GRANT_ID`),
+  UNIQUE KEY `USERROLEMAPINDEX` (`PRINCIPAL_NAME`,`ROLE_ID`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `ROLE_MAP_N49` (`ROLE_ID`),
+  CONSTRAINT `ROLE_MAP_FK1` FOREIGN KEY (`ROLE_ID`) REFERENCES `ROLES` (`ROLE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SDS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SDS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `CD_ID` bigint(20) DEFAULT NULL,
+  `INPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `IS_COMPRESSED` bit(1) NOT NULL,
+  `IS_STOREDASSUBDIRECTORIES` bit(1) NOT NULL,
+  `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `NUM_BUCKETS` int(11) NOT NULL,
+  `OUTPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `SERDE_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`SD_ID`),
+  KEY `SDS_N49` (`SERDE_ID`),
+  KEY `SDS_N50` (`CD_ID`),
+  CONSTRAINT `SDS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`),
+  CONSTRAINT `SDS_FK2` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SD_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SD_PARAMS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SD_ID`,`PARAM_KEY`),
+  KEY `SD_PARAMS_N49` (`SD_ID`),
+  CONSTRAINT `SD_PARAMS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SEQUENCE_TABLE`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SEQUENCE_TABLE` (
+  `SEQUENCE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `NEXT_VAL` bigint(20) NOT NULL,
+  PRIMARY KEY (`SEQUENCE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SERDES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SERDES` (
+  `SERDE_ID` bigint(20) NOT NULL,
+  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `SLIB` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SERDE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SERDE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SERDE_PARAMS` (
+  `SERDE_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SERDE_ID`,`PARAM_KEY`),
+  KEY `SERDE_PARAMS_N49` (`SERDE_ID`),
+  CONSTRAINT `SERDE_PARAMS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_COL_NAMES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_COL_NAMES` (
+  `SD_ID` bigint(20) NOT NULL,
+  `SKEWED_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `SKEWED_COL_NAMES_N49` (`SD_ID`),
+  CONSTRAINT `SKEWED_COL_NAMES_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_COL_VALUE_LOC_MAP`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_COL_VALUE_LOC_MAP` (
+  `SD_ID` bigint(20) NOT NULL,
+  `STRING_LIST_ID_KID` bigint(20) NOT NULL,
+  `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SD_ID`,`STRING_LIST_ID_KID`),
+  KEY `SKEWED_COL_VALUE_LOC_MAP_N49` (`STRING_LIST_ID_KID`),
+  KEY `SKEWED_COL_VALUE_LOC_MAP_N50` (`SD_ID`),
+  CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK2` FOREIGN KEY (`STRING_LIST_ID_KID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
+  CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_STRING_LIST`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST` (
+  `STRING_LIST_ID` bigint(20) NOT NULL,
+  PRIMARY KEY (`STRING_LIST_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_STRING_LIST_VALUES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST_VALUES` (
+  `STRING_LIST_ID` bigint(20) NOT NULL,
+  `STRING_LIST_VALUE` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`STRING_LIST_ID`,`INTEGER_IDX`),
+  KEY `SKEWED_STRING_LIST_VALUES_N49` (`STRING_LIST_ID`),
+  CONSTRAINT `SKEWED_STRING_LIST_VALUES_FK1` FOREIGN KEY (`STRING_LIST_ID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_VALUES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_VALUES` (
+  `SD_ID_OID` bigint(20) NOT NULL,
+  `STRING_LIST_ID_EID` bigint(20) NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID_OID`,`INTEGER_IDX`),
+  KEY `SKEWED_VALUES_N50` (`SD_ID_OID`),
+  KEY `SKEWED_VALUES_N49` (`STRING_LIST_ID_EID`),
+  CONSTRAINT `SKEWED_VALUES_FK2` FOREIGN KEY (`STRING_LIST_ID_EID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
+  CONSTRAINT `SKEWED_VALUES_FK1` FOREIGN KEY (`SD_ID_OID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SORT_COLS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SORT_COLS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `ORDER` int(11) NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `SORT_COLS_N49` (`SD_ID`),
+  CONSTRAINT `SORT_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TABLE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TABLE_PARAMS` (
+  `TBL_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`TBL_ID`,`PARAM_KEY`),
+  KEY `TABLE_PARAMS_N49` (`TBL_ID`),
+  CONSTRAINT `TABLE_PARAMS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBLS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBLS` (
+  `TBL_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `DB_ID` bigint(20) DEFAULT NULL,
+  `LAST_ACCESS_TIME` int(11) NOT NULL,
+  `OWNER` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `RETENTION` int(11) NOT NULL,
+  `SD_ID` bigint(20) DEFAULT NULL,
+  `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `VIEW_EXPANDED_TEXT` mediumtext,
+  `VIEW_ORIGINAL_TEXT` mediumtext,
+  `LINK_TARGET_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`TBL_ID`),
+  UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`),
+  KEY `TBLS_N50` (`SD_ID`),
+  KEY `TBLS_N49` (`DB_ID`),
+  KEY `TBLS_N51` (`LINK_TARGET_ID`),
+  CONSTRAINT `TBLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+  CONSTRAINT `TBLS_FK2` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`),
+  CONSTRAINT `TBLS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBL_COL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBL_COL_PRIVS` (
+  `TBL_COLUMN_GRANT_ID` bigint(20) NOT NULL,
+  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`TBL_COLUMN_GRANT_ID`),
+  KEY `TABLECOLUMNPRIVILEGEINDEX` (`TBL_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `TBL_COL_PRIVS_N49` (`TBL_ID`),
+  CONSTRAINT `TBL_COL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBL_PRIVS` (
+  `TBL_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`TBL_GRANT_ID`),
+  KEY `TBL_PRIVS_N49` (`TBL_ID`),
+  KEY `TABLEPRIVILEGEINDEX` (`TBL_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  CONSTRAINT `TBL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TAB_COL_STATS`
+--
+CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` (
+ `CS_ID` bigint(20) NOT NULL,
+ `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TBL_ID` bigint(20) NOT NULL,
+ `LONG_LOW_VALUE` bigint(20),
+ `LONG_HIGH_VALUE` bigint(20),
+ `DOUBLE_HIGH_VALUE` double(53,4),
+ `DOUBLE_LOW_VALUE` double(53,4),
+ `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `NUM_NULLS` bigint(20) NOT NULL,
+ `NUM_DISTINCTS` bigint(20),
+ `AVG_COL_LEN` double(53,4),
+ `MAX_COL_LEN` bigint(20),
+ `NUM_TRUES` bigint(20),
+ `NUM_FALSES` bigint(20),
+ `LAST_ANALYZED` bigint(20) NOT NULL,
+  PRIMARY KEY (`CS_ID`),
+  CONSTRAINT `TAB_COL_STATS_FK` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for table `PART_COL_STATS`
+--
+CREATE TABLE IF NOT EXISTS `PART_COL_STATS` (
+ `CS_ID` bigint(20) NOT NULL,
+ `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PART_ID` bigint(20) NOT NULL,
+ `LONG_LOW_VALUE` bigint(20),
+ `LONG_HIGH_VALUE` bigint(20),
+ `DOUBLE_HIGH_VALUE` double(53,4),
+ `DOUBLE_LOW_VALUE` double(53,4),
+ `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `NUM_NULLS` bigint(20) NOT NULL,
+ `NUM_DISTINCTS` bigint(20),
+ `AVG_COL_LEN` double(53,4),
+ `MAX_COL_LEN` bigint(20),
+ `NUM_TRUES` bigint(20),
+ `NUM_FALSES` bigint(20),
+ `LAST_ANALYZED` bigint(20) NOT NULL,
+  PRIMARY KEY (`CS_ID`),
+  CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for table `TYPES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TYPES` (
+  `TYPES_ID` bigint(20) NOT NULL,
+  `TYPE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TYPE1` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TYPE2` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`TYPES_ID`),
+  UNIQUE KEY `UNIQUE_TYPE` (`TYPE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TYPE_FIELDS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TYPE_FIELDS` (
+  `TYPE_NAME` bigint(20) NOT NULL,
+  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `FIELD_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `FIELD_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`TYPE_NAME`,`FIELD_NAME`),
+  KEY `TYPE_FIELDS_N49` (`TYPE_NAME`),
+  CONSTRAINT `TYPE_FIELDS_FK1` FOREIGN KEY (`TYPE_NAME`) REFERENCES `TYPES` (`TYPES_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+-- Table `MASTER_KEYS` for classes [org.apache.hadoop.hive.metastore.model.MMasterKey]
+CREATE TABLE IF NOT EXISTS `MASTER_KEYS`
+(
+    `KEY_ID` INTEGER NOT NULL AUTO_INCREMENT,
+    `MASTER_KEY` VARCHAR(767) BINARY NULL,
+    PRIMARY KEY (`KEY_ID`)
+) ENGINE=INNODB DEFAULT CHARSET=latin1;
+
+-- Table `DELEGATION_TOKENS` for classes [org.apache.hadoop.hive.metastore.model.MDelegationToken]
+CREATE TABLE IF NOT EXISTS `DELEGATION_TOKENS`
+(
+    `TOKEN_IDENT` VARCHAR(767) BINARY NOT NULL,
+    `TOKEN` VARCHAR(767) BINARY NULL,
+    PRIMARY KEY (`TOKEN_IDENT`)
+) ENGINE=INNODB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for VERSION
+--
+CREATE TABLE IF NOT EXISTS `VERSION` (
+  `VER_ID` BIGINT NOT NULL,
+  `SCHEMA_VERSION` VARCHAR(127) NOT NULL,
+  `VERSION_COMMENT` VARCHAR(255),
+  PRIMARY KEY (`VER_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '0.12.0', 'Hive release version 0.12.0');
+
+/*!40101 SET character_set_client = @saved_cs_client */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
+/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
+/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2012-08-23  0:56:31

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/hive-schema-0.12.0.oracle.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/hive-schema-0.12.0.oracle.sql b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/hive-schema-0.12.0.oracle.sql
new file mode 100755
index 0000000..a2cbfa2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/hive-schema-0.12.0.oracle.sql
@@ -0,0 +1,717 @@
+-- Table SEQUENCE_TABLE is an internal table required by DataNucleus.
+-- NOTE: Some versions of SchemaTool do not automatically generate this table.
+-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+CREATE TABLE SEQUENCE_TABLE
+(
+   SEQUENCE_NAME VARCHAR2(255) NOT NULL,
+   NEXT_VAL NUMBER NOT NULL
+);
+
+ALTER TABLE SEQUENCE_TABLE ADD CONSTRAINT PART_TABLE_PK PRIMARY KEY (SEQUENCE_NAME);
+
+-- Table NUCLEUS_TABLES is an internal table required by DataNucleus.
+-- This table is required if datanucleus.autoStartMechanism=SchemaTable
+-- NOTE: Some versions of SchemaTool do not automatically generate this table.
+-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+CREATE TABLE NUCLEUS_TABLES
+(
+   CLASS_NAME VARCHAR2(128) NOT NULL,
+   TABLE_NAME VARCHAR2(128) NOT NULL,
+   TYPE VARCHAR2(4) NOT NULL,
+   OWNER VARCHAR2(2) NOT NULL,
+   VERSION VARCHAR2(20) NOT NULL,
+   INTERFACE_NAME VARCHAR2(255) NULL
+);
+
+ALTER TABLE NUCLEUS_TABLES ADD CONSTRAINT NUCLEUS_TABLES_PK PRIMARY KEY (CLASS_NAME);
+
+-- Table PART_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+CREATE TABLE PART_COL_PRIVS
+(
+    PART_COLUMN_GRANT_ID NUMBER NOT NULL,
+    "COLUMN_NAME" VARCHAR2(128) NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PART_ID NUMBER NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    PART_COL_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY (PART_COLUMN_GRANT_ID);
+
+-- Table CDS.
+CREATE TABLE CDS
+(
+    CD_ID NUMBER NOT NULL
+);
+
+ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID);
+
+-- Table COLUMNS_V2 for join relationship
+CREATE TABLE COLUMNS_V2
+(
+    CD_ID NUMBER NOT NULL,
+    "COMMENT" VARCHAR2(256) NULL,
+    "COLUMN_NAME" VARCHAR2(128) NOT NULL,
+    TYPE_NAME VARCHAR2(4000) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_PK PRIMARY KEY (CD_ID,"COLUMN_NAME");
+
+-- Table PARTITION_KEY_VALS for join relationship
+CREATE TABLE PARTITION_KEY_VALS
+(
+    PART_ID NUMBER NOT NULL,
+    PART_KEY_VAL VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY (PART_ID,INTEGER_IDX);
+
+-- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase]
+CREATE TABLE DBS
+(
+    DB_ID NUMBER NOT NULL,
+    "DESC" VARCHAR2(4000) NULL,
+    DB_LOCATION_URI VARCHAR2(4000) NOT NULL,
+    "NAME" VARCHAR2(128) NULL
+);
+
+ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
+
+-- Table PARTITION_PARAMS for join relationship
+CREATE TABLE PARTITION_PARAMS
+(
+    PART_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PART_ID,PARAM_KEY);
+
+-- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+CREATE TABLE SERDES
+(
+    SERDE_ID NUMBER NOT NULL,
+    "NAME" VARCHAR2(128) NULL,
+    SLIB VARCHAR2(4000) NULL
+);
+
+ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
+
+-- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType]
+CREATE TABLE TYPES
+(
+    TYPES_ID NUMBER NOT NULL,
+    TYPE_NAME VARCHAR2(128) NULL,
+    TYPE1 VARCHAR2(767) NULL,
+    TYPE2 VARCHAR2(767) NULL
+);
+
+ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID);
+
+-- Table PARTITION_KEYS for join relationship
+CREATE TABLE PARTITION_KEYS
+(
+    TBL_ID NUMBER NOT NULL,
+    PKEY_COMMENT VARCHAR2(4000) NULL,
+    PKEY_NAME VARCHAR2(128) NOT NULL,
+    PKEY_TYPE VARCHAR2(767) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEY_PK PRIMARY KEY (TBL_ID,PKEY_NAME);
+
+-- Table ROLES for classes [org.apache.hadoop.hive.metastore.model.MRole]
+CREATE TABLE ROLES
+(
+    ROLE_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    OWNER_NAME VARCHAR2(128) NULL,
+    ROLE_NAME VARCHAR2(128) NULL
+);
+
+ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID);
+
+-- Table PARTITIONS for classes [org.apache.hadoop.hive.metastore.model.MPartition]
+CREATE TABLE PARTITIONS
+(
+    PART_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+    PART_NAME VARCHAR2(767) NULL,
+    SD_ID NUMBER NULL,
+    TBL_ID NUMBER NULL
+);
+
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
+
+-- Table INDEX_PARAMS for join relationship
+CREATE TABLE INDEX_PARAMS
+(
+    INDEX_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PARAM_KEY);
+
+-- Table TBL_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+CREATE TABLE TBL_COL_PRIVS
+(
+    TBL_COLUMN_GRANT_ID NUMBER NOT NULL,
+    "COLUMN_NAME" VARCHAR2(128) NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    TBL_COL_PRIV VARCHAR2(128) NULL,
+    TBL_ID NUMBER NULL
+);
+
+ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_PK PRIMARY KEY (TBL_COLUMN_GRANT_ID);
+
+-- Table IDXS for classes [org.apache.hadoop.hive.metastore.model.MIndex]
+CREATE TABLE IDXS
+(
+    INDEX_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    DEFERRED_REBUILD NUMBER(1) NOT NULL CHECK (DEFERRED_REBUILD IN (1,0)),
+    INDEX_HANDLER_CLASS VARCHAR2(4000) NULL,
+    INDEX_NAME VARCHAR2(128) NULL,
+    INDEX_TBL_ID NUMBER NULL,
+    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+    ORIG_TBL_ID NUMBER NULL,
+    SD_ID NUMBER NULL
+);
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_PK PRIMARY KEY (INDEX_ID);
+
+-- Table BUCKETING_COLS for join relationship
+CREATE TABLE BUCKETING_COLS
+(
+    SD_ID NUMBER NOT NULL,
+    BUCKET_COL_NAME VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+-- Table TYPE_FIELDS for join relationship
+CREATE TABLE TYPE_FIELDS
+(
+    TYPE_NAME NUMBER NOT NULL,
+    "COMMENT" VARCHAR2(256) NULL,
+    FIELD_NAME VARCHAR2(128) NOT NULL,
+    FIELD_TYPE VARCHAR2(767) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_PK PRIMARY KEY (TYPE_NAME,FIELD_NAME);
+
+-- Table SD_PARAMS for join relationship
+CREATE TABLE SD_PARAMS
+(
+    SD_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY);
+
+-- Table GLOBAL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+CREATE TABLE GLOBAL_PRIVS
+(
+    USER_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    USER_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_ID);
+
+-- Table SDS for classes [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+CREATE TABLE SDS
+(
+    SD_ID NUMBER NOT NULL,
+    CD_ID NUMBER NULL,
+    INPUT_FORMAT VARCHAR2(4000) NULL,
+    IS_COMPRESSED NUMBER(1) NOT NULL CHECK (IS_COMPRESSED IN (1,0)),
+    LOCATION VARCHAR2(4000) NULL,
+    NUM_BUCKETS NUMBER (10) NOT NULL,
+    OUTPUT_FORMAT VARCHAR2(4000) NULL,
+    SERDE_ID NUMBER NULL,
+    IS_STOREDASSUBDIRECTORIES NUMBER(1) NOT NULL CHECK (IS_STOREDASSUBDIRECTORIES IN (1,0))
+);
+
+ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID);
+
+-- Table TABLE_PARAMS for join relationship
+CREATE TABLE TABLE_PARAMS
+(
+    TBL_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY);
+
+-- Table SORT_COLS for join relationship
+CREATE TABLE SORT_COLS
+(
+    SD_ID NUMBER NOT NULL,
+    "COLUMN_NAME" VARCHAR2(128) NULL,
+    "ORDER" NUMBER (10) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+-- Table TBL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+CREATE TABLE TBL_PRIVS
+(
+    TBL_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    TBL_PRIV VARCHAR2(128) NULL,
+    TBL_ID NUMBER NULL
+);
+
+ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_PK PRIMARY KEY (TBL_GRANT_ID);
+
+-- Table DATABASE_PARAMS for join relationship
+CREATE TABLE DATABASE_PARAMS
+(
+    DB_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(180) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID,PARAM_KEY);
+
+-- Table ROLE_MAP for classes [org.apache.hadoop.hive.metastore.model.MRoleMap]
+CREATE TABLE ROLE_MAP
+(
+    ROLE_GRANT_ID NUMBER NOT NULL,
+    ADD_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    ROLE_ID NUMBER NULL
+);
+
+ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_PK PRIMARY KEY (ROLE_GRANT_ID);
+
+-- Table SERDE_PARAMS for join relationship
+CREATE TABLE SERDE_PARAMS
+(
+    SERDE_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY);
+
+-- Table PART_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+CREATE TABLE PART_PRIVS
+(
+    PART_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PART_ID NUMBER NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    PART_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_PK PRIMARY KEY (PART_GRANT_ID);
+
+-- Table DB_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+CREATE TABLE DB_PRIVS
+(
+    DB_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    DB_ID NUMBER NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    DB_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_PK PRIMARY KEY (DB_GRANT_ID);
+
+-- Table TBLS for classes [org.apache.hadoop.hive.metastore.model.MTable]
+CREATE TABLE TBLS
+(
+    TBL_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    DB_ID NUMBER NULL,
+    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+    OWNER VARCHAR2(767) NULL,
+    RETENTION NUMBER (10) NOT NULL,
+    SD_ID NUMBER NULL,
+    TBL_NAME VARCHAR2(128) NULL,
+    TBL_TYPE VARCHAR2(128) NULL,
+    VIEW_EXPANDED_TEXT CLOB NULL,
+    VIEW_ORIGINAL_TEXT CLOB NULL
+);
+
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
+
+-- Table PARTITION_EVENTS for classes [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+CREATE TABLE PARTITION_EVENTS
+(
+    PART_NAME_ID NUMBER NOT NULL,
+    DB_NAME VARCHAR2(128) NULL,
+    EVENT_TIME NUMBER NOT NULL,
+    EVENT_TYPE NUMBER (10) NOT NULL,
+    PARTITION_NAME VARCHAR2(767) NULL,
+    TBL_NAME VARCHAR2(128) NULL
+);
+
+ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID);
+
+-- Table SKEWED_STRING_LIST for classes [org.apache.hadoop.hive.metastore.model.MStringList]
+CREATE TABLE SKEWED_STRING_LIST
+(
+    STRING_LIST_ID NUMBER NOT NULL
+);
+
+ALTER TABLE SKEWED_STRING_LIST ADD CONSTRAINT SKEWED_STRING_LIST_PK PRIMARY KEY (STRING_LIST_ID);
+
+CREATE TABLE SKEWED_STRING_LIST_VALUES
+(
+    STRING_LIST_ID NUMBER NOT NULL,
+    "STRING_LIST_VALUE" VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_PK PRIMARY KEY (STRING_LIST_ID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_COL_NAMES
+(
+    SD_ID NUMBER NOT NULL,
+    "SKEWED_COL_NAME" VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_COL_VALUE_LOC_MAP
+(
+    SD_ID NUMBER NOT NULL,
+    STRING_LIST_ID_KID NUMBER NOT NULL,
+    "LOCATION" VARCHAR2(4000) NULL
+);
+
+CREATE TABLE MASTER_KEYS
+(
+    KEY_ID NUMBER (10) NOT NULL,
+    MASTER_KEY VARCHAR2(767) NULL
+);
+
+CREATE TABLE DELEGATION_TOKENS
+(
+    TOKEN_IDENT VARCHAR2(767) NOT NULL,
+    TOKEN VARCHAR2(767) NULL
+);
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK PRIMARY KEY (SD_ID,STRING_LIST_ID_KID);
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK1 FOREIGN KEY (STRING_LIST_ID_KID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_VALUES
+(
+    SD_ID_OID NUMBER NOT NULL,
+    STRING_LIST_ID_EID NUMBER NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_PK PRIMARY KEY (SD_ID_OID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID_EID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK2 FOREIGN KEY (SD_ID_OID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+-- column statistics
+
+CREATE TABLE TAB_COL_STATS (
+ CS_ID NUMBER NOT NULL,
+ DB_NAME VARCHAR2(128) NOT NULL,
+ TABLE_NAME VARCHAR2(128) NOT NULL,
+ COLUMN_NAME VARCHAR2(128) NOT NULL,
+ COLUMN_TYPE VARCHAR2(128) NOT NULL,
+ TBL_ID NUMBER NOT NULL,
+ LONG_LOW_VALUE NUMBER,
+ LONG_HIGH_VALUE NUMBER,
+ DOUBLE_LOW_VALUE NUMBER,
+ DOUBLE_HIGH_VALUE NUMBER,
+ BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
+ BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
+ NUM_NULLS NUMBER NOT NULL,
+ NUM_DISTINCTS NUMBER,
+ AVG_COL_LEN NUMBER,
+ MAX_COL_LEN NUMBER,
+ NUM_TRUES NUMBER,
+ NUM_FALSES NUMBER,
+ LAST_ANALYZED NUMBER NOT NULL
+);
+
+CREATE TABLE VERSION (
+  VER_ID NUMBER NOT NULL,
+  SCHEMA_VERSION VARCHAR(127) NOT NULL,
+  VERSION_COMMENT VARCHAR(255)
+);
+ALTER TABLE VERSION ADD CONSTRAINT VERSION_PK PRIMARY KEY (VER_ID);
+
+ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PKEY PRIMARY KEY (CS_ID);
+
+ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS(TBL_ID);
+
+CREATE TABLE PART_COL_STATS (
+ CS_ID NUMBER NOT NULL,
+ DB_NAME VARCHAR2(128) NOT NULL,
+ TABLE_NAME VARCHAR2(128) NOT NULL,
+ PARTITION_NAME VARCHAR2(767) NOT NULL,
+ COLUMN_NAME VARCHAR2(128) NOT NULL,
+ COLUMN_TYPE VARCHAR2(128) NOT NULL,
+ PART_ID NUMBER NOT NULL,
+ LONG_LOW_VALUE NUMBER,
+ LONG_HIGH_VALUE NUMBER,
+ DOUBLE_LOW_VALUE NUMBER,
+ DOUBLE_HIGH_VALUE NUMBER,
+ BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
+ BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
+ NUM_NULLS NUMBER NOT NULL,
+ NUM_DISTINCTS NUMBER,
+ AVG_COL_LEN NUMBER,
+ MAX_COL_LEN NUMBER,
+ NUM_TRUES NUMBER,
+ NUM_FALSES NUMBER,
+ LAST_ANALYZED NUMBER NOT NULL
+);
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PKEY PRIMARY KEY (CS_ID);
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED;
+
+CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);
+
+-- Constraints for table PART_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PART_COL_PRIVS_N49 ON PART_COL_PRIVS (PART_ID);
+
+CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table COLUMNS_V2
+ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_FK1 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX COLUMNS_V2_N49 ON COLUMNS_V2 (CD_ID);
+
+
+-- Constraints for table PARTITION_KEY_VALS
+ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_KEY_VALS_N49 ON PARTITION_KEY_VALS (PART_ID);
+
+
+-- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase]
+CREATE UNIQUE INDEX UNIQUE_DATABASE ON DBS ("NAME");
+
+
+-- Constraints for table PARTITION_PARAMS
+ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_PARAMS_N49 ON PARTITION_PARAMS (PART_ID);
+
+
+-- Constraints for table SERDES for class(es) [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+
+-- Constraints for table TYPES for class(es) [org.apache.hadoop.hive.metastore.model.MType]
+CREATE UNIQUE INDEX UNIQUE_TYPE ON TYPES (TYPE_NAME);
+
+
+-- Constraints for table PARTITION_KEYS
+ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEYS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_KEYS_N49 ON PARTITION_KEYS (TBL_ID);
+
+
+-- Constraints for table ROLES for class(es) [org.apache.hadoop.hive.metastore.model.MRole]
+CREATE UNIQUE INDEX ROLEENTITYINDEX ON ROLES (ROLE_NAME);
+
+
+-- Constraints for table PARTITIONS for class(es) [org.apache.hadoop.hive.metastore.model.MPartition]
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITIONS_N49 ON PARTITIONS (SD_ID);
+
+CREATE INDEX PARTITIONS_N50 ON PARTITIONS (TBL_ID);
+
+CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (PART_NAME,TBL_ID);
+
+
+-- Constraints for table INDEX_PARAMS
+ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_FK1 FOREIGN KEY (INDEX_ID) REFERENCES IDXS (INDEX_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX INDEX_PARAMS_N49 ON INDEX_PARAMS (INDEX_ID);
+
+
+-- Constraints for table TBL_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS (TBL_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX TBL_COL_PRIVS_N49 ON TBL_COL_PRIVS (TBL_ID);
+
+
+-- Constraints for table IDXS for class(es) [org.apache.hadoop.hive.metastore.model.MIndex]
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK1 FOREIGN KEY (ORIG_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK3 FOREIGN KEY (INDEX_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE UNIQUE INDEX UNIQUEINDEX ON IDXS (INDEX_NAME,ORIG_TBL_ID);
+
+CREATE INDEX IDXS_N50 ON IDXS (INDEX_TBL_ID);
+
+CREATE INDEX IDXS_N51 ON IDXS (SD_ID);
+
+CREATE INDEX IDXS_N49 ON IDXS (ORIG_TBL_ID);
+
+
+-- Constraints for table BUCKETING_COLS
+ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX BUCKETING_COLS_N49 ON BUCKETING_COLS (SD_ID);
+
+
+-- Constraints for table TYPE_FIELDS
+ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_FK1 FOREIGN KEY (TYPE_NAME) REFERENCES TYPES (TYPES_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TYPE_FIELDS_N49 ON TYPE_FIELDS (TYPE_NAME);
+
+
+-- Constraints for table SD_PARAMS
+ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SD_PARAMS_N49 ON SD_PARAMS (SD_ID);
+
+
+-- Constraints for table GLOBAL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+CREATE UNIQUE INDEX GLOBALPRIVILEGEINDEX ON GLOBAL_PRIVS (PRINCIPAL_NAME,PRINCIPAL_TYPE,USER_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table SDS for class(es) [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+ALTER TABLE SDS ADD CONSTRAINT SDS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
+ALTER TABLE SDS ADD CONSTRAINT SDS_FK2 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SDS_N49 ON SDS (SERDE_ID);
+CREATE INDEX SDS_N50 ON SDS (CD_ID);
+
+
+-- Constraints for table TABLE_PARAMS
+ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID);
+
+
+-- Constraints for table SORT_COLS
+ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SORT_COLS_N49 ON SORT_COLS (SD_ID);
+
+
+-- Constraints for table TBL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TBL_PRIVS_N49 ON TBL_PRIVS (TBL_ID);
+
+CREATE INDEX TABLEPRIVILEGEINDEX ON TBL_PRIVS (TBL_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table DATABASE_PARAMS
+ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX DATABASE_PARAMS_N49 ON DATABASE_PARAMS (DB_ID);
+
+
+-- Constraints for table ROLE_MAP for class(es) [org.apache.hadoop.hive.metastore.model.MRoleMap]
+ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES ROLES (ROLE_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX ROLE_MAP_N49 ON ROLE_MAP (ROLE_ID);
+
+CREATE UNIQUE INDEX USERROLEMAPINDEX ON ROLE_MAP (PRINCIPAL_NAME,ROLE_ID,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table SERDE_PARAMS
+ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SERDE_PARAMS_N49 ON SERDE_PARAMS (SERDE_ID);
+
+
+-- Constraints for table PART_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTPRIVILEGEINDEX ON PART_PRIVS (PART_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX PART_PRIVS_N49 ON PART_PRIVS (PART_ID);
+
+
+-- Constraints for table DB_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+CREATE UNIQUE INDEX DBPRIVILEGEINDEX ON DB_PRIVS (DB_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,DB_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX DB_PRIVS_N49 ON DB_PRIVS (DB_ID);
+
+
+-- Constraints for table TBLS for class(es) [org.apache.hadoop.hive.metastore.model.MTable]
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK2 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TBLS_N49 ON TBLS (DB_ID);
+
+CREATE UNIQUE INDEX UNIQUETABLE ON TBLS (TBL_NAME,DB_ID);
+
+CREATE INDEX TBLS_N50 ON TBLS (SD_ID);
+
+
+-- Constraints for table PARTITION_EVENTS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+CREATE INDEX PARTITIONEVENTINDEX ON PARTITION_EVENTS (PARTITION_NAME);
+
+INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '0.12.0', 'Hive release version 0.12.0');


[13/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/nodemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/nodemanager.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/nodemanager.py
new file mode 100755
index 0000000..d185b7e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/nodemanager.py
@@ -0,0 +1,144 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import nodemanager_upgrade
+
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_XML
+
+from yarn import yarn
+from service import service
+
+class Nodemanager(Script):
+
+  def get_component_name(self):
+    return "hadoop-yarn-nodemanager"
+
+  def install(self, env):
+    self.install_packages(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    yarn(name="nodemanager")
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing NodeManager Stack Upgrade pre-restart")
+    import params
+    env.set_params(params)
+
+    if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-yarn-nodemanager", params.version)
+      #Execute(format("stack-select set hadoop-yarn-nodemanager {version}"))
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    service('nodemanager',action='start')
+
+  def post_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing NodeManager Stack Upgrade post-restart")
+    import params
+    env.set_params(params)
+
+    nodemanager_upgrade.post_upgrade_check()
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    service('nodemanager',action='stop')
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.nodemanager_pid_file)
+
+  def security_status(self, env):
+    import status_params
+    env.set_params(status_params)
+    if status_params.security_enabled:
+      props_value_check = {"yarn.timeline-service.http-authentication.type": "kerberos",
+                           "yarn.acl.enable": "true"}
+      props_empty_check = ["yarn.nodemanager.principal",
+                           "yarn.nodemanager.keytab",
+                           "yarn.nodemanager.webapp.spnego-principal",
+                           "yarn.nodemanager.webapp.spnego-keytab-file"]
+
+      props_read_check = ["yarn.nodemanager.keytab",
+                          "yarn.nodemanager.webapp.spnego-keytab-file"]
+      yarn_site_props = build_expectations('yarn-site', props_value_check, props_empty_check,
+                                           props_read_check)
+
+      yarn_expectations ={}
+      yarn_expectations.update(yarn_site_props)
+
+      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
+                                                   {'yarn-site.xml': FILE_TYPE_XML})
+      result_issues = validate_security_config_properties(security_params, yarn_site_props)
+      if not result_issues: # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if ( 'yarn-site' not in security_params
+               or 'yarn.nodemanager.keytab' not in security_params['yarn-site']
+               or 'yarn.nodemanager.principal' not in security_params['yarn-site']) \
+            or 'yarn.nodemanager.webapp.spnego-keytab-file' not in security_params['yarn-site'] \
+            or 'yarn.nodemanager.webapp.spnego-principal' not in security_params['yarn-site']:
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out(
+              {"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.yarn_user,
+                                security_params['yarn-site']['yarn.nodemanager.keytab'],
+                                security_params['yarn-site']['yarn.nodemanager.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.yarn_user,
+                                security_params['yarn-site']['yarn.nodemanager.webapp.spnego-keytab-file'],
+                                security_params['yarn-site']['yarn.nodemanager.webapp.spnego-principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+if __name__ == "__main__":
+  Nodemanager().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/nodemanager_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/nodemanager_upgrade.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/nodemanager_upgrade.py
new file mode 100755
index 0000000..54e0fae
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/nodemanager_upgrade.py
@@ -0,0 +1,74 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import subprocess
+
+from resource_management.core.logger import Logger
+from resource_management.core.exceptions import Fail
+from resource_management.core.resources.system import Execute
+from resource_management.core import shell
+from resource_management.libraries.functions.decorator import retry
+
+
+def post_upgrade_check():
+  '''
+  Checks that the NodeManager has rejoined the cluster.
+  This function will obtain the Kerberos ticket if security is enabled.
+  :return:
+  '''
+  import params
+
+  Logger.info('NodeManager executing "yarn node -list -states=RUNNING" to verify the node has rejoined the cluster...')
+  if params.security_enabled and params.nodemanager_kinit_cmd:
+    Execute(params.nodemanager_kinit_cmd, user = params.yarn_user)
+
+  _check_nodemanager_startup()
+
+
+@retry(times=12, sleep_time=10, err_class=Fail)
+def _check_nodemanager_startup():
+  '''
+  Checks that a NodeManager is in a RUNNING state in the cluster via
+  "yarn node -list -states=RUNNING" command. Once the NodeManager is found to be
+  alive this method will return, otherwise it will raise a Fail(...) and retry
+  automatically.
+  :return:
+  '''
+  import params
+
+  command = 'yarn node -list -states=RUNNING'
+
+  try:
+    # 'su - yarn -c "yarn node -status c6401.ambari.apache.org:45454"'
+    return_code, yarn_output = shell.call(command, user=params.hdfs_user)
+  except:
+    raise Fail('Unable to determine if the NodeManager has started after upgrade.')
+
+  if return_code == 0:
+    hostname = params.hostname.lower()
+    nodemanager_address = params.nm_address.lower()
+    yarn_output = yarn_output.lower()
+
+    if hostname in yarn_output or nodemanager_address in yarn_output:
+      Logger.info('NodeManager with ID {0} has rejoined the cluster.'.format(nodemanager_address))
+      return
+    else:
+      raise Fail('NodeManager with ID {0} was not found in the list of running NodeManagers'.format(nodemanager_address))
+
+  raise Fail('Unable to determine if the NodeManager has started after upgrade (result code {0})'.format(str(return_code)))

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/params.py
new file mode 100755
index 0000000..957e2dd
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/params.py
@@ -0,0 +1,224 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import os
+from resource_management.libraries.functions.version import format_stack_version, compare_versions
+from resource_management.libraries.functions.default import default
+from resource_management import *
+from resource_management.libraries.functions import conf_select, stack_select
+import status_params
+
+# server configurations
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+stack_name = default("/hostLevelParams/stack_name", None)
+
+# This is expected to be of the form #.#.#.#
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+stack_version = format_stack_version(stack_version_unformatted)
+
+# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
+version = default("/commandParams/version", None)
+
+hostname = config['hostname']
+
+host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+
+#hadoop params
+yarn_role_root = "hadoop-yarn-client"
+mapred_role_root = "hadoop-mapreduce-client"
+
+command_role = default("/role", "")
+if command_role == "APP_TIMELINE_SERVER":
+  yarn_role_root = "hadoop-yarn-timelineserver"
+elif command_role == "HISTORYSERVER":
+  mapred_role_root = "hadoop-mapreduce-historyserver"
+elif command_role == "MAPREDUCE2_CLIENT":
+  mapred_role_root = "hadoop-mapreduce-client"
+elif command_role == "NODEMANAGER":
+  yarn_role_root = "hadoop-yarn-nodemanager"
+elif command_role == "RESOURCEMANAGER":
+  yarn_role_root = "hadoop-yarn-resourcemanager"
+elif command_role == "YARN_CLIENT":
+  yarn_role_root = "hadoop-yarn-client"
+
+nm_hosts = default("/clusterHostInfo/nm_hosts", [])
+number_of_nm = len(nm_hosts)
+
+hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
+hadoop_bin = stack_select.get_hadoop_dir("sbin")
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+hadoop_mapred2_jar_location = format("/usr/iop/current/{mapred_role_root}")
+mapred_bin = format("/usr/iop/current/{mapred_role_root}/sbin")
+hadoop_yarn_home = format("/usr/iop/current/{yarn_role_root}")
+yarn_bin = format("/usr/iop/current/{yarn_role_root}/sbin")
+yarn_container_bin = format("/usr/iop/current/{yarn_role_root}/bin")
+
+limits_conf_dir = "/etc/security/limits.d"
+execute_path = os.environ['PATH'] + os.pathsep + hadoop_bin_dir + os.pathsep + yarn_container_bin
+
+ulimit_cmd = "ulimit -c unlimited;"
+
+mapred_user = status_params.mapred_user
+yarn_user = status_params.yarn_user
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+yarn_executor_container_group = config['configurations']['yarn-site']['yarn.nodemanager.linux-container-executor.group']
+kinit_path_local = functions.get_kinit_path()
+rm_hosts = config['clusterHostInfo']['rm_host']
+rm_host = rm_hosts[0]
+rm_port = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'].split(':')[-1]
+rm_https_port = "8090"
+
+default_exclude_path=os.path.join(hadoop_conf_dir, 'yarn.exclude')
+# TODO UPGRADE default, update site during upgrade
+rm_nodes_exclude_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path",default_exclude_path)
+
+java64_home = config['hostLevelParams']['java_home']
+hadoop_ssl_enabled = default("/configurations/core-site/hadoop.ssl.enabled", False)
+
+yarn_heapsize = config['configurations']['yarn-env']['yarn_heapsize']
+resourcemanager_heapsize = config['configurations']['yarn-env']['resourcemanager_heapsize']
+nodemanager_heapsize = config['configurations']['yarn-env']['nodemanager_heapsize']
+apptimelineserver_heapsize = default("/configurations/yarn-env/apptimelineserver_heapsize", 1024)
+ats_leveldb_dir = config['configurations']['yarn-site']['yarn.timeline-service.leveldb-timeline-store.path']
+yarn_log_dir_prefix = config['configurations']['yarn-env']['yarn_log_dir_prefix']
+yarn_pid_dir_prefix = status_params.yarn_pid_dir_prefix
+mapred_pid_dir_prefix = status_params.mapred_pid_dir_prefix
+mapred_log_dir_prefix = config['configurations']['mapred-env']['mapred_log_dir_prefix']
+mapred_env_sh_template = config['configurations']['mapred-env']['content']
+yarn_env_sh_template = config['configurations']['yarn-env']['content']
+
+if len(rm_hosts) > 1:
+  additional_rm_host = rm_hosts[1]
+  rm_webui_address = format("{rm_host}:{rm_port},{additional_rm_host}:{rm_port}")
+  rm_webui_https_address = format("{rm_host}:{rm_https_port},{additional_rm_host}:{rm_https_port}")
+else:
+  rm_webui_address = format("{rm_host}:{rm_port}")
+  rm_webui_https_address = format("{rm_host}:{rm_https_port}")
+
+nm_webui_address = config['configurations']['yarn-site']['yarn.nodemanager.webapp.address']
+hs_webui_address = config['configurations']['mapred-site']['mapreduce.jobhistory.webapp.address']
+nm_address = config['configurations']['yarn-site']['yarn.nodemanager.address']  # still contains 0.0.0.0
+if hostname and nm_address and nm_address.startswith("0.0.0.0:"):
+  nm_address = nm_address.replace("0.0.0.0", hostname)
+
+nm_local_dirs = config['configurations']['yarn-site']['yarn.nodemanager.local-dirs']
+nm_log_dirs = config['configurations']['yarn-site']['yarn.nodemanager.log-dirs']
+
+distrAppJarName = "hadoop-yarn-applications-distributedshell-2.*.jar"
+hadoopMapredExamplesJarName = "hadoop-mapreduce-examples-2.*.jar"
+
+yarn_pid_dir = status_params.yarn_pid_dir
+mapred_pid_dir = status_params.mapred_pid_dir
+
+mapred_log_dir = format("{mapred_log_dir_prefix}/{mapred_user}")
+yarn_log_dir = format("{yarn_log_dir_prefix}/{yarn_user}")
+mapred_job_summary_log = format("{mapred_log_dir_prefix}/{mapred_user}/hadoop-mapreduce.jobsummary.log")
+yarn_job_summary_log = format("{yarn_log_dir_prefix}/{yarn_user}/hadoop-mapreduce.jobsummary.log")
+
+user_group = config['configurations']['cluster-env']['user_group']
+
+#exclude file
+default_exclude_path=os.path.join(hadoop_conf_dir, 'yarn.exclude')
+exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
+exclude_file_path = default("/configurations/yarn-site/yarn.resourcemanager.nodes.exclude-path",default_exclude_path)
+
+ats_host = set(default("/clusterHostInfo/app_timeline_server_hosts", []))
+has_ats = not len(ats_host) == 0
+
+# default kinit commands
+rm_kinit_cmd = ""
+yarn_timelineservice_kinit_cmd = ""
+nodemanager_kinit_cmd = ""
+
+if security_enabled:
+  _rm_principal_name = config['configurations']['yarn-site']['yarn.resourcemanager.principal']
+  _rm_principal_name = _rm_principal_name.replace('_HOST',hostname.lower())
+  _rm_keytab = config['configurations']['yarn-site']['yarn.resourcemanager.keytab']
+  rm_kinit_cmd = format("{kinit_path_local} -kt {_rm_keytab} {_rm_principal_name};")
+
+  if has_ats:
+    _yarn_timelineservice_principal_name = config['configurations']['yarn-site']['yarn.timeline-service.principal']
+    _yarn_timelineservice_principal_name = _yarn_timelineservice_principal_name.replace('_HOST', hostname.lower())
+    _yarn_timelineservice_keytab = config['configurations']['yarn-site']['yarn.timeline-service.keytab']
+    yarn_timelineservice_kinit_cmd = format("{kinit_path_local} -kt {_yarn_timelineservice_keytab} {_yarn_timelineservice_principal_name};")
+
+  if 'yarn.nodemanager.principal' in config['configurations']['yarn-site']:
+    _nodemanager_principal_name = default('/configurations/yarn-site/yarn.nodemanager.principal', None)
+    if _nodemanager_principal_name:
+      _nodemanager_principal_name = _nodemanager_principal_name.replace('_HOST', hostname.lower())
+
+    _nodemanager_keytab = config['configurations']['yarn-site']['yarn.nodemanager.keytab']
+    nodemanager_kinit_cmd = format("{kinit_path_local} -kt {_nodemanager_keytab} {_nodemanager_principal_name};")
+
+else:
+  rm_kinit_cmd = ""
+  yarn_timelineservice_kinit_cmd = ""
+
+yarn_log_aggregation_enabled = config['configurations']['yarn-site']['yarn.log-aggregation-enable']
+yarn_nm_app_log_dir =  config['configurations']['yarn-site']['yarn.nodemanager.remote-app-log-dir']
+mapreduce_jobhistory_intermediate_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.intermediate-done-dir']
+mapreduce_jobhistory_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.done-dir']
+jobhistory_heapsize = default("/configurations/mapred-env/jobhistory_heapsize", "900")
+jhs_leveldb_state_store_dir = default('/configurations/mapred-site/mapreduce.jobhistory.recovery.store.leveldb.path', "/hadoop/mapreduce/jhs")
+
+#for create_hdfs_directory
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+
+
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
+import functools
+#create partial functions with common arguments for every HdfsResource call
+#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
+ )
+update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
+
+mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
+
+#taskcontroller.cfg
+
+mapred_local_dir = "/tmp/hadoop-mapred/mapred/local"
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+min_user_id = config['configurations']['yarn-env']['min_user_id']
+
+# Node labels
+node_labels_dir = default("/configurations/yarn-site/yarn.node-labels.fs-store.root-dir", None)

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/resourcemanager.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/resourcemanager.py
new file mode 100755
index 0000000..8e350b9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/resourcemanager.py
@@ -0,0 +1,179 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_XML
+
+from yarn import yarn
+from service import service
+
+
+class Resourcemanager(Script):
+
+  def get_component_name(self):
+    return "hadoop-yarn-resourcemanager"
+
+  def install(self, env):
+    self.install_packages(env)
+
+  def configure(self, env):
+    import params
+
+    env.set_params(params)
+    yarn(name='resourcemanager')
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Stack Upgrade post-restart")
+    import params
+    env.set_params(params)
+
+    if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-yarn-resourcemanager", params.version)
+      #Execute(format("stack-select set hadoop-yarn-resourcemanager {version}"))
+
+  def start(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    service('resourcemanager',
+            action='start'
+    )
+
+  def stop(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+
+    service('resourcemanager',
+            action='stop'
+    )
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    check_process_status(status_params.resourcemanager_pid_file)
+    pass
+
+  def security_status(self, env):
+    import status_params
+    env.set_params(status_params)
+    if status_params.security_enabled:
+      props_value_check = {"yarn.timeline-service.http-authentication.type": "kerberos",
+                           "yarn.acl.enable": "true"}
+      props_empty_check = ["yarn.resourcemanager.principal",
+                           "yarn.resourcemanager.keytab",
+                           "yarn.resourcemanager.webapp.spnego-principal",
+                           "yarn.resourcemanager.webapp.spnego-keytab-file"]
+
+      props_read_check = ["yarn.resourcemanager.keytab",
+                          "yarn.resourcemanager.webapp.spnego-keytab-file"]
+      yarn_site_props = build_expectations('yarn-site', props_value_check, props_empty_check,
+                                           props_read_check)
+
+      yarn_expectations ={}
+      yarn_expectations.update(yarn_site_props)
+
+      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
+                                                   {'yarn-site.xml': FILE_TYPE_XML})
+      result_issues = validate_security_config_properties(security_params, yarn_site_props)
+      if not result_issues: # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if ( 'yarn-site' not in security_params
+               or 'yarn.resourcemanager.keytab' not in security_params['yarn-site']
+               or 'yarn.resourcemanager.principal' not in security_params['yarn-site']) \
+            or 'yarn.resourcemanager.webapp.spnego-keytab-file' not in security_params['yarn-site'] \
+            or 'yarn.resourcemanager.webapp.spnego-principal' not in security_params['yarn-site']:
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out(
+              {"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.yarn_user,
+                                security_params['yarn-site']['yarn.resourcemanager.keytab'],
+                                security_params['yarn-site']['yarn.resourcemanager.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.yarn_user,
+                                security_params['yarn-site']['yarn.resourcemanager.webapp.spnego-keytab-file'],
+                                security_params['yarn-site']['yarn.resourcemanager.webapp.spnego-principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+  def refreshqueues(self, env):
+    import params
+
+    self.configure(env)
+    env.set_params(params)
+
+    service('resourcemanager',
+            action='refreshQueues'
+    )
+
+  def decommission(self, env):
+    import params
+
+    env.set_params(params)
+    rm_kinit_cmd = params.rm_kinit_cmd
+    yarn_user = params.yarn_user
+    conf_dir = params.hadoop_conf_dir
+    user_group = params.user_group
+
+    yarn_refresh_cmd = format("{rm_kinit_cmd} yarn --config {conf_dir} rmadmin -refreshNodes")
+
+    File(params.exclude_file_path,
+         content=Template("exclude_hosts_list.j2"),
+         owner=yarn_user,
+         group=user_group
+    )
+
+    if params.update_exclude_file_only == False:
+      Execute(yarn_refresh_cmd,
+            environment= {'PATH' : params.execute_path },
+            user=yarn_user)
+      pass
+    pass
+
+
+if __name__ == "__main__":
+  Resourcemanager().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/service.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/service.py
new file mode 100755
index 0000000..1002094
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/service.py
@@ -0,0 +1,76 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+
+def service(componentName, action='start', serviceName='yarn'):
+
+  import params
+
+  if serviceName == 'mapreduce' and componentName == 'historyserver':
+    daemon = format("{mapred_bin}/mr-jobhistory-daemon.sh")
+    pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-{componentName}.pid")
+    usr = params.mapred_user
+  else:
+    daemon = format("{yarn_bin}/yarn-daemon.sh")
+    pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-{componentName}.pid")
+    usr = params.yarn_user
+
+  cmd = format("export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {daemon} --config {hadoop_conf_dir}")
+
+  if action == 'start':
+    daemon_cmd = format("{ulimit_cmd} {cmd} start {componentName}")
+    check_process = format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1")
+
+    # Remove the pid file if its corresponding process is not running.
+    File(pid_file,
+         action="delete",
+         not_if=check_process)
+
+    # Attempt to start the process. Internally, this is skipped if the process is already running.
+    Execute(daemon_cmd,
+            user=usr,
+            not_if=check_process
+    )
+
+    # Ensure that the process with the expected PID exists.
+    Execute(check_process,
+            user=usr,
+            not_if=check_process,
+            initial_wait=5
+    )
+
+  elif action == 'stop':
+    daemon_cmd = format("{cmd} stop {componentName}")
+    Execute(daemon_cmd,
+            user=usr)
+
+    File(pid_file,
+         action="delete")
+
+  elif action == 'refreshQueues':
+    rm_kinit_cmd = params.rm_kinit_cmd
+    refresh_cmd = format("{rm_kinit_cmd} export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {yarn_container_bin}/yarn rmadmin -refreshQueues")
+
+    Execute(refresh_cmd,
+            user=usr,
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/service_check.py
new file mode 100755
index 0000000..7f42f9e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/service_check.py
@@ -0,0 +1,89 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+import re
+from resource_management.libraries.functions.version import compare_versions
+from resource_management import *
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+
+CURL_CONNECTION_TIMEOUT = '5'
+
+
+class ServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    if params.stack_version != "" and compare_versions(params.stack_version, '4.0') >= 0:
+      path_to_distributed_shell_jar = "/usr/iop/current/hadoop-yarn-client/hadoop-yarn-applications-distributedshell.jar"
+    else:
+      path_to_distributed_shell_jar = "/usr/lib/hadoop-yarn/hadoop-yarn-applications-distributedshell*.jar"
+
+    yarn_distrubuted_shell_check_cmd = format("yarn org.apache.hadoop.yarn.applications.distributedshell.Client "
+                                              "-shell_command ls -num_containers {number_of_nm} -jar {path_to_distributed_shell_jar}")
+
+    if params.security_enabled:
+      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
+      smoke_cmd = format("{kinit_cmd} {yarn_distrubuted_shell_check_cmd}")
+    else:
+      smoke_cmd = yarn_distrubuted_shell_check_cmd
+
+    return_code, out = shell.checked_call(smoke_cmd,
+                                          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+                                          user=params.smokeuser,
+                                          )
+
+    m = re.search("appTrackingUrl=(.*),\s", out)
+    app_url = m.group(1)
+
+    splitted_app_url = str(app_url).split('/')
+
+    for item in splitted_app_url:
+      if "application" in item:
+        application_name = item
+
+    json_response_received = False
+    for rm_host in params.rm_hosts:
+      info_app_url = "http://" + rm_host + ":" + params.rm_port + "/ws/v1/cluster/apps/" + application_name
+
+      get_app_info_cmd = "curl --negotiate -u : -sL --connect-timeout " + CURL_CONNECTION_TIMEOUT + " " + info_app_url
+
+      return_code, stdout = shell.checked_call(get_app_info_cmd,
+                                            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+                                            user=params.smokeuser,
+                                            )
+
+      try:
+        json_response = json.loads(stdout)
+
+        json_response_received = True
+
+        if json_response['app']['state'] != "FINISHED" or json_response['app']['finalStatus'] != "SUCCEEDED":
+          raise Exception("Application " + app_url + " state/status is not valid. Should be FINISHED/SUCCEEDED.")
+      except Exception as e:
+        pass
+
+    if not json_response_received:
+      raise Exception("Could not get json response from YARN API")
+
+if __name__ == "__main__":
+  ServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/status_params.py
new file mode 100755
index 0000000..3b7f5ba
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/status_params.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+mapred_user = config['configurations']['mapred-env']['mapred_user']
+yarn_user = config['configurations']['yarn-env']['yarn_user']
+yarn_pid_dir_prefix = config['configurations']['yarn-env']['yarn_pid_dir_prefix']
+mapred_pid_dir_prefix = config['configurations']['mapred-env']['mapred_pid_dir_prefix']
+yarn_pid_dir = format("{yarn_pid_dir_prefix}/{yarn_user}")
+mapred_pid_dir = format("{mapred_pid_dir_prefix}/{mapred_user}")
+
+resourcemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-resourcemanager.pid")
+nodemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-nodemanager.pid")
+yarn_historyserver_pid_file_old = format("{yarn_pid_dir}/yarn-{yarn_user}-historyserver.pid")
+yarn_historyserver_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-timelineserver.pid")  # *-historyserver.pid is deprecated
+mapred_historyserver_pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-historyserver.pid")
+
+# Security related/required params
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+hostname = config['hostname']
+kinit_path_local = functions.get_kinit_path()
+security_enabled = config['configurations']['cluster-env']['security_enabled']

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/yarn.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/yarn.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/yarn.py
new file mode 100755
index 0000000..1264b87
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/yarn.py
@@ -0,0 +1,277 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+import sys
+import os
+
+
+def yarn(name = None):
+  import params
+
+
+  if name in ["nodemanager","historyserver"]:
+    if params.yarn_log_aggregation_enabled:
+      params.HdfsResource(params.yarn_nm_app_log_dir,
+                           action="create_on_execute",
+                           type="directory",
+                           owner=params.yarn_user,
+                           group=params.user_group,
+                           mode=0777,
+                           recursive_chmod=True
+      )
+    params.HdfsResource("/mapred",
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.mapred_user
+    )
+    params.HdfsResource("/mapred/system",
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.hdfs_user
+    )
+    params.HdfsResource(params.mapreduce_jobhistory_intermediate_done_dir,
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.mapred_user,
+                         group=params.user_group,
+                         mode=0777
+    )
+
+    params.HdfsResource(params.mapreduce_jobhistory_done_dir,
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.mapred_user,
+                         group=params.user_group,
+                         mode=0777
+    )
+    params.HdfsResource(None, action="execute")
+    Directory(params.jhs_leveldb_state_store_dir,
+              owner=params.mapred_user,
+              group=params.user_group,
+              create_parents=True,
+              cd_access="a",
+              )
+
+  if name == "nodemanager":
+    Directory(params.nm_local_dirs.split(',') + params.nm_log_dirs.split(','),
+              owner=params.yarn_user,
+              group=params.user_group,
+              create_parents=True,
+              cd_access="a",
+              ignore_failures=True,
+              mode=0775
+              )
+
+    Execute(('chown', '-R', params.yarn_user, params.nm_local_dirs),
+            only_if=format("test -d {nm_local_dirs}"),
+            sudo=True)
+
+
+    if params.security_enabled:
+      smokeuser_directories = [os.path.join(dir, 'usercache' ,params.smokeuser)
+                               for dir in params.nm_local_dirs.split(',')]
+      for directory in smokeuser_directories:
+        Execute(('chown', '-R', params.smokeuser, directory),
+                only_if=format("test -d {directory}"),
+                sudo=True,
+        )
+  Directory([params.yarn_pid_dir_prefix, params.yarn_pid_dir, params.yarn_log_dir],
+            owner=params.yarn_user,
+            group=params.user_group,
+            create_parents=True,
+            cd_access = 'a',
+  )
+
+  Directory([params.mapred_pid_dir_prefix, params.mapred_pid_dir, params.mapred_log_dir_prefix, params.mapred_log_dir],
+            owner=params.mapred_user,
+            group=params.user_group,
+            create_parents=True,
+            cd_access = 'a',
+  )
+  Directory([params.yarn_log_dir_prefix],
+            owner=params.yarn_user,
+            create_parents=True,
+            ignore_failures=True,
+            cd_access = 'a',
+  )
+
+  XmlConfig("core-site.xml",
+            conf_dir=params.hadoop_conf_dir,
+            configurations=params.config['configurations']['core-site'],
+            configuration_attributes=params.config['configuration_attributes']['core-site'],
+            owner=params.hdfs_user,
+            group=params.user_group,
+            mode=0644
+  )
+
+  XmlConfig("mapred-site.xml",
+            conf_dir=params.hadoop_conf_dir,
+            configurations=params.config['configurations']['mapred-site'],
+            configuration_attributes=params.config['configuration_attributes']['mapred-site'],
+            owner=params.yarn_user,
+            group=params.user_group,
+            mode=0644
+  )
+
+  XmlConfig("yarn-site.xml",
+            conf_dir=params.hadoop_conf_dir,
+            configurations=params.config['configurations']['yarn-site'],
+            configuration_attributes=params.config['configuration_attributes']['yarn-site'],
+            owner=params.yarn_user,
+            group=params.user_group,
+            mode=0644
+  )
+
+  XmlConfig("capacity-scheduler.xml",
+            conf_dir=params.hadoop_conf_dir,
+            configurations=params.config['configurations']['capacity-scheduler'],
+            configuration_attributes=params.config['configuration_attributes']['capacity-scheduler'],
+            owner=params.yarn_user,
+            group=params.user_group,
+            mode=0644
+  )
+
+  if name == 'resourcemanager':
+    File(params.yarn_job_summary_log,
+       owner=params.yarn_user,
+       group=params.user_group
+    )
+    if params.node_labels_dir:
+      params.HdfsResource(params.node_labels_dir,
+                           type="directory",
+                           action="create_on_execute",
+                           owner=params.yarn_user,
+                           group=params.user_group,
+                           mode=0700
+      )
+      params.HdfsResource(None, action="execute")
+  elif name == 'apptimelineserver':
+    Directory(params.ats_leveldb_dir,
+       owner=params.yarn_user,
+       group=params.user_group,
+       create_parents=True,
+       cd_access="a",
+    )
+
+  File(params.rm_nodes_exclude_path,
+       owner=params.yarn_user,
+       group=params.user_group
+  )
+
+  File(format("{limits_conf_dir}/yarn.conf"),
+       mode=0644,
+       content=Template('yarn.conf.j2')
+  )
+
+  File(format("{limits_conf_dir}/mapreduce.conf"),
+       mode=0644,
+       content=Template('mapreduce.conf.j2')
+  )
+
+  File(format("{hadoop_conf_dir}/yarn-env.sh"),
+       owner=params.yarn_user,
+       group=params.user_group,
+       mode=0755,
+       content=InlineTemplate(params.yarn_env_sh_template)
+  )
+
+  container_executor = format("{yarn_container_bin}/container-executor")
+  File(container_executor,
+       group=params.yarn_executor_container_group,
+       mode=06050
+  )
+
+  File(format("{hadoop_conf_dir}/container-executor.cfg"),
+       group=params.user_group,
+       mode=0644,
+       content=Template('container-executor.cfg.j2')
+  )
+
+
+  if params.security_enabled:
+    tc_mode = 0644
+    tc_owner = "root"
+  else:
+    tc_mode = None
+    tc_owner = params.hdfs_user
+
+  File(format("{hadoop_conf_dir}/mapred-env.sh"),
+       owner=tc_owner,
+       content=InlineTemplate(params.mapred_env_sh_template)
+  )
+
+  if params.security_enabled:
+    File(os.path.join(params.hadoop_bin, "task-controller"),
+         owner="root",
+         group=params.mapred_tt_group,
+         mode=06050
+    )
+    File(os.path.join(params.hadoop_conf_dir, 'taskcontroller.cfg'),
+         owner = tc_owner,
+         mode = tc_mode,
+         group = params.mapred_tt_group,
+         content=Template("taskcontroller.cfg.j2")
+    )
+  else:
+    File(os.path.join(params.hadoop_conf_dir, 'taskcontroller.cfg'),
+         owner=tc_owner,
+         content=Template("taskcontroller.cfg.j2")
+    )
+
+  if "mapred-site" in params.config['configurations']:
+    XmlConfig("mapred-site.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations']['mapred-site'],
+              configuration_attributes=params.config['configuration_attributes']['mapred-site'],
+              owner=params.mapred_user,
+              group=params.user_group
+    )
+
+  if "capacity-scheduler" in params.config['configurations']:
+    XmlConfig("capacity-scheduler.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations'][
+                'capacity-scheduler'],
+              configuration_attributes=params.config['configuration_attributes']['capacity-scheduler'],
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+
+  if os.path.exists(os.path.join(params.hadoop_conf_dir, 'fair-scheduler.xml')):
+    File(os.path.join(params.hadoop_conf_dir, 'fair-scheduler.xml'),
+         owner=params.mapred_user,
+         group=params.user_group
+    )
+
+  if os.path.exists(
+    os.path.join(params.hadoop_conf_dir, 'ssl-client.xml.example')):
+    File(os.path.join(params.hadoop_conf_dir, 'ssl-client.xml.example'),
+         owner=params.mapred_user,
+         group=params.user_group
+    )
+
+  if os.path.exists(
+    os.path.join(params.hadoop_conf_dir, 'ssl-server.xml.example')):
+    File(os.path.join(params.hadoop_conf_dir, 'ssl-server.xml.example'),
+         owner=params.mapred_user,
+         group=params.user_group
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/yarn_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/yarn_client.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/yarn_client.py
new file mode 100755
index 0000000..0af5ccb
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/yarn_client.py
@@ -0,0 +1,56 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+
+from yarn import yarn
+
+class YarnClient(Script):
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    yarn()
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+  def get_component_name(self):
+    return "hadoop-client"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-client", params.version)
+      #Execute(format("stack-select set hadoop-client {version}"))
+
+if __name__ == "__main__":
+  YarnClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/templates/container-executor.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/templates/container-executor.cfg.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/templates/container-executor.cfg.j2
new file mode 100755
index 0000000..9f38f183
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/templates/container-executor.cfg.j2
@@ -0,0 +1,40 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+yarn.nodemanager.local-dirs={{nm_local_dirs}}
+yarn.nodemanager.log-dirs={{nm_log_dirs}}
+yarn.nodemanager.linux-container-executor.group={{yarn_executor_container_group}}
+banned.users=hdfs,yarn,mapred,bin
+min.user.id=200

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/templates/exclude_hosts_list.j2
new file mode 100755
index 0000000..c7ce416
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/templates/exclude_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in exclude_hosts %}
+{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/templates/mapreduce.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/templates/mapreduce.conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/templates/mapreduce.conf.j2
new file mode 100755
index 0000000..b996645
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/templates/mapreduce.conf.j2
@@ -0,0 +1,35 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{{mapred_user}}   - nofile 32768
+{{mapred_user}}   - nproc  65536

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/templates/taskcontroller.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/templates/taskcontroller.cfg.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/templates/taskcontroller.cfg.j2
new file mode 100755
index 0000000..3d5f4f2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/templates/taskcontroller.cfg.j2
@@ -0,0 +1,38 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+mapred.local.dir={{mapred_local_dir}}
+mapreduce.tasktracker.group={{mapred_tt_group}}
+hadoop.log.dir={{hdfs_log_dir_prefix}}/{{mapred_user}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/templates/yarn.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/templates/yarn.conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/templates/yarn.conf.j2
new file mode 100755
index 0000000..3bd7a45
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/templates/yarn.conf.j2
@@ -0,0 +1,35 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{{yarn_user}}   - nofile 32768
+{{yarn_user}}   - nproc  65536

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/alerts.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/alerts.json
new file mode 100755
index 0000000..027d7fa
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/alerts.json
@@ -0,0 +1,58 @@
+{
+  "ZOOKEEPER": {
+    "service": [
+      {
+        "name": "zookeeper_server_process_percent",
+        "label": "Percent ZooKeeper Servers Available",
+        "description": "This alert is triggered if the number of down ZooKeeper servers in the cluster is greater than the configured critical threshold. It aggregates the results of ZooKeeper process checks.",
+        "interval": 1,
+        "scope": "SERVICE",
+        "enabled": true,
+        "source": {
+          "type": "AGGREGATE",
+          "alert_name": "zookeeper_server_process",
+          "reporting": {
+            "ok": {
+              "text": "affected: [{1}], total: [{0}]"
+            },
+            "warning": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 0.35
+            },
+            "critical": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 0.70
+            }
+          }
+        }
+      }
+    ],
+    "ZOOKEEPER_SERVER": [
+      {
+        "name": "zookeeper_server_process",
+        "label": "ZooKeeper Server Process",
+        "description": "This host-level alert is triggered if the ZooKeeper server process cannot be determined to be up and listening on the network.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "PORT",
+          "uri": "{{zoo.cfg/clientPort}}",
+          "default_port": 2181,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/configuration/zoo.cfg.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/configuration/zoo.cfg.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/configuration/zoo.cfg.xml
new file mode 100755
index 0000000..275cc6a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/configuration/zoo.cfg.xml
@@ -0,0 +1,84 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>tickTime</name>
+    <display-name>Length of single Tick</display-name>
+    <value>2000</value>
+    <description>The length of a single tick in milliseconds, which is the basic time unit used by ZooKeeper</description>
+    <value-attributes>
+      <type>int</type>
+      <overridable>false</overridable>
+      <unit>ms</unit>
+    </value-attributes>
+  </property>
+  <property>
+    <name>initLimit</name>
+    <display-name>Ticks to allow for sync at Init</display-name>
+    <value>10</value>
+    <description>Ticks to allow for sync at Init.</description>
+    <value-attributes>
+      <type>int</type>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+  <property>
+    <name>syncLimit</name>
+    <display-name>Ticks to allow for sync at Runtime</display-name>
+    <value>5</value>
+    <description>Ticks to allow for sync at Runtime.</description>
+    <value-attributes>
+      <type>int</type>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+  <property>
+    <name>clientPort</name>
+    <display-name>Port for running ZK Server</display-name>
+    <value>2181</value>
+    <description>Port for running ZK Server.</description>
+    <value-attributes>
+      <type>int</type>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+  <property>
+    <name>dataDir</name>
+    <display-name>ZooKeeper directory</display-name>
+    <value>/hadoop/zookeeper</value>
+    <description>Data directory for ZooKeeper.</description>
+    <value-attributes>
+      <type>directory</type>
+    </value-attributes>
+  </property>
+  <property>
+    <name>autopurge.snapRetainCount</name>
+    <value>30</value>
+    <description>ZooKeeper purge feature retains the autopurge.snapRetainCount most recent snapshots and the corresponding transaction logs in the dataDir and dataLogDir respectively and deletes the rest. </description>
+  </property>
+  <property>
+    <name>autopurge.purgeInterval</name>
+    <value>24</value>
+    <description>The time interval in hours for which the purge task has to be triggered. Set to a positive integer (1 and above) to enable the auto purging.</description>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/configuration/zookeeper-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/configuration/zookeeper-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/configuration/zookeeper-env.xml
new file mode 100755
index 0000000..b57992e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/configuration/zookeeper-env.xml
@@ -0,0 +1,73 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>zk_user</name>
+    <value>zookeeper</value>
+    <property-type>USER</property-type>
+    <description>ZooKeeper User.</description>
+  </property>
+  <property>
+    <name>zk_log_dir</name>
+    <display-name>ZooKeeper Log Dir</display-name>
+    <value>/var/log/zookeeper</value>
+    <description>ZooKeeper Log Dir</description>
+    <value-attributes>
+      <type>directory</type>
+      <editable-only-at-install>true</editable-only-at-install>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+  <property>
+    <name>zk_pid_dir</name>
+    <display-name>ZooKeeper PID Dir</display-name>
+    <value>/var/run/zookeeper</value>
+    <description>ZooKeeper Pid Dir</description>
+    <value-attributes>
+      <type>directory</type>
+      <editable-only-at-install>true</editable-only-at-install>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+
+  <!-- zookeeper-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for zookeeper-env.sh file</description>
+    <value>
+export JAVA_HOME={{java64_home}}
+export ZOOKEEPER_HOME={{zk_home}}
+export ZOO_LOG_DIR={{zk_log_dir}}
+export ZOOPIDFILE={{zk_pid_file}}
+export SERVER_JVMFLAGS={{zk_server_heapsize}}
+export JAVA=$JAVA_HOME/bin/java
+export CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*
+
+{% if security_enabled %}
+export SERVER_JVMFLAGS="$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}"
+export CLIENT_JVMFLAGS="$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}"
+{% endif %}
+    </value>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/configuration/zookeeper-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/configuration/zookeeper-log4j.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/configuration/zookeeper-log4j.xml
new file mode 100755
index 0000000..6fcf5bc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/configuration/zookeeper-log4j.xml
@@ -0,0 +1,101 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <description>Custom log4j.properties</description>
+    <value>
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+#
+# ZooKeeper Logging Configuration
+#
+
+# DEFAULT: console appender only
+log4j.rootLogger=INFO, CONSOLE
+
+# Example with rolling log file
+#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE
+
+# Example with rolling log file and tracing
+#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE
+
+#
+# Log INFO level and above messages to the console
+#
+log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
+log4j.appender.CONSOLE.Threshold=INFO
+log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
+log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
+
+#
+# Add ROLLINGFILE to rootLogger to get log file output
+#    Log DEBUG level and above messages to a log file
+log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender
+log4j.appender.ROLLINGFILE.Threshold=DEBUG
+log4j.appender.ROLLINGFILE.File=zookeeper.log
+
+# Max log file size of 10MB
+log4j.appender.ROLLINGFILE.MaxFileSize=10MB
+# uncomment the next line to limit number of backup files
+#log4j.appender.ROLLINGFILE.MaxBackupIndex=10
+
+log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout
+log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
+
+
+#
+# Add TRACEFILE to rootLogger to get log file output
+#    Log DEBUG level and above messages to a log file
+log4j.appender.TRACEFILE=org.apache.log4j.FileAppender
+log4j.appender.TRACEFILE.Threshold=TRACE
+log4j.appender.TRACEFILE.File=zookeeper_trace.log
+
+log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout
+### Notice we are including log4j's NDC here (%x)
+log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/kerberos.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/kerberos.json
new file mode 100755
index 0000000..0a64ea5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/kerberos.json
@@ -0,0 +1,39 @@
+{
+  "services": [
+    {
+      "name": "ZOOKEEPER",
+      "identities": [
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "components": [
+        {
+          "name": "ZOOKEEPER_SERVER",
+          "identities": [
+            {
+              "name": "zookeeper_zk",
+              "principal": {
+                "value": "zookeeper/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "zookeeper-env/zookeeper_principal_name"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/zk.service.keytab",
+                "owner": {
+                  "name": "${zookeeper-env/zk_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "zookeeper-env/zookeeper_keytab_path"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/metainfo.xml
new file mode 100755
index 0000000..2bd2532
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/metainfo.xml
@@ -0,0 +1,91 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ZOOKEEPER</name>
+      <displayName>ZooKeeper</displayName>
+      <comment>Centralized service which provides highly reliable distributed coordination</comment>
+      <version>3.4.6</version>
+      <components>
+
+        <component>
+          <name>ZOOKEEPER_SERVER</name>
+          <displayName>ZooKeeper Server</displayName>
+          <category>MASTER</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/zookeeper_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>ZOOKEEPER_CLIENT</name>
+          <displayName>ZooKeeper Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/zookeeper_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>env</type>
+              <fileName>zookeeper-env.sh</fileName>
+              <dictionaryName>zookeeper-env</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>log4j.properties</fileName>
+              <dictionaryName>zookeeper-log4j</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>zookeeper</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>zookeeper-log4j</config-type>
+        <config-type>zookeeper-env</config-type>
+        <config-type>zoo.cfg</config-type>
+      </configuration-dependencies>
+      <restartRequiredAfterChange>true</restartRequiredAfterChange>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/files/zkEnv.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/files/zkEnv.sh b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/files/zkEnv.sh
new file mode 100755
index 0000000..fa1b832
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/files/zkEnv.sh
@@ -0,0 +1,96 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script should be sourced into other zookeeper
+# scripts to setup the env variables
+
+# We use ZOOCFGDIR if defined,
+# otherwise we use /etc/zookeeper
+# or the conf directory that is
+# a sibling of this script's directory
+if [ "x$ZOOCFGDIR" = "x" ]
+then
+    if [ -d "/etc/zookeeper" ]
+    then
+        ZOOCFGDIR="/etc/zookeeper"
+    else
+        ZOOCFGDIR="$ZOOBINDIR/../conf"
+    fi
+fi
+
+if [ "x$ZOOCFG" = "x" ]
+then
+    ZOOCFG="zoo.cfg"
+fi
+
+ZOOCFG="$ZOOCFGDIR/$ZOOCFG"
+
+if [ -e "$ZOOCFGDIR/zookeeper-env.sh" ]
+then
+    . "$ZOOCFGDIR/zookeeper-env.sh"
+fi
+
+if [ "x${ZOO_LOG_DIR}" = "x" ]
+then
+    ZOO_LOG_DIR="."
+fi
+
+if [ "x${ZOO_LOG4J_PROP}" = "x" ]
+then
+    ZOO_LOG4J_PROP="INFO,CONSOLE"
+fi
+
+#add the zoocfg dir to classpath
+CLASSPATH="$ZOOCFGDIR:$CLASSPATH"
+
+for i in "$ZOOBINDIR"/../src/java/lib/*.jar
+do
+    CLASSPATH="$i:$CLASSPATH"
+done
+
+#make it work in the release
+for i in "$ZOOBINDIR"/../lib/*.jar
+do
+    CLASSPATH="$i:$CLASSPATH"
+done
+
+#make it work in the release
+for i in "$ZOOBINDIR"/../zookeeper-*.jar
+do
+    CLASSPATH="$i:$CLASSPATH"
+done
+
+#make it work for developers
+for d in "$ZOOBINDIR"/../build/lib/*.jar
+do
+   CLASSPATH="$d:$CLASSPATH"
+done
+
+#make it work for developers
+CLASSPATH="$ZOOBINDIR/../build/classes:$CLASSPATH"
+
+case "`uname`" in
+    CYGWIN*) cygwin=true ;;
+    *) cygwin=false ;;
+esac
+
+if $cygwin
+then
+    CLASSPATH=`cygpath -wp "$CLASSPATH"`
+fi
+
+#echo "CLASSPATH=$CLASSPATH"


[10/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_21.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_21.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_21.py
new file mode 100755
index 0000000..be49bf8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_21.py
@@ -0,0 +1,259 @@
+#!/usr/bin/env ambari-python-wrap
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management.core.logger import Logger
+
+try:
+  from stack_advisor_206 import *
+except ImportError:
+  #Ignore ImportError
+  print("stack_advisor_206 not found")
+
+class HDP21StackAdvisor(HDP206StackAdvisor):
+
+  def getServiceConfigurationRecommenderDict(self):
+    parentRecommendConfDict = super(HDP21StackAdvisor, self).getServiceConfigurationRecommenderDict()
+    childRecommendConfDict = {
+      "OOZIE": self.recommendOozieConfigurations,
+      "HIVE": self.recommendHiveConfigurations,
+      "TEZ": self.recommendTezConfigurations
+    }
+    parentRecommendConfDict.update(childRecommendConfDict)
+    return parentRecommendConfDict
+
+  def recommendOozieConfigurations(self, configurations, clusterData, services, hosts):
+    oozieSiteProperties = getSiteProperties(services['configurations'], 'oozie-site')
+    oozieEnvProperties = getSiteProperties(services['configurations'], 'oozie-env')
+    putOozieProperty = self.putProperty(configurations, "oozie-site", services)
+    putOozieEnvProperty = self.putProperty(configurations, "oozie-env", services)
+
+    if "FALCON_SERVER" in clusterData["components"]:
+      putOozieSiteProperty = self.putProperty(configurations, "oozie-site", services)
+      falconUser = None
+      if "falcon-env" in services["configurations"] and "falcon_user" in services["configurations"]["falcon-env"]["properties"]:
+        falconUser = services["configurations"]["falcon-env"]["properties"]["falcon_user"]
+        if falconUser is not None:
+          putOozieSiteProperty("oozie.service.ProxyUserService.proxyuser.{0}.groups".format(falconUser) , "*")
+          putOozieSiteProperty("oozie.service.ProxyUserService.proxyuser.{0}.hosts".format(falconUser) , "*")
+        falconUserOldValue = getOldValue(self, services, "falcon-env", "falcon_user")
+        if falconUserOldValue is not None:
+          if 'forced-configurations' not in services:
+            services["forced-configurations"] = []
+          putOozieSitePropertyAttribute = self.putPropertyAttribute(configurations, "oozie-site")
+          putOozieSitePropertyAttribute("oozie.service.ProxyUserService.proxyuser.{0}.groups".format(falconUserOldValue), 'delete', 'true')
+          putOozieSitePropertyAttribute("oozie.service.ProxyUserService.proxyuser.{0}.hosts".format(falconUserOldValue), 'delete', 'true')
+          services["forced-configurations"].append({"type" : "oozie-site", "name" : "oozie.service.ProxyUserService.proxyuser.{0}.hosts".format(falconUserOldValue)})
+          services["forced-configurations"].append({"type" : "oozie-site", "name" : "oozie.service.ProxyUserService.proxyuser.{0}.groups".format(falconUserOldValue)})
+          if falconUser is not None:
+            services["forced-configurations"].append({"type" : "oozie-site", "name" : "oozie.service.ProxyUserService.proxyuser.{0}.hosts".format(falconUser)})
+            services["forced-configurations"].append({"type" : "oozie-site", "name" : "oozie.service.ProxyUserService.proxyuser.{0}.groups".format(falconUser)})
+
+      putMapredProperty = self.putProperty(configurations, "oozie-site")
+      putMapredProperty("oozie.services.ext",
+                        "org.apache.oozie.service.JMSAccessorService," +
+                        "org.apache.oozie.service.PartitionDependencyManagerService," +
+                        "org.apache.oozie.service.HCatAccessorService")
+    if oozieEnvProperties and oozieSiteProperties and self.checkSiteProperties(oozieSiteProperties, 'oozie.service.JPAService.jdbc.driver') and self.checkSiteProperties(oozieEnvProperties, 'oozie_database'):
+      putOozieProperty('oozie.service.JPAService.jdbc.driver', self.getDBDriver(oozieEnvProperties['oozie_database']))
+    if oozieSiteProperties and oozieEnvProperties and self.checkSiteProperties(oozieSiteProperties, 'oozie.db.schema.name', 'oozie.service.JPAService.jdbc.url') and self.checkSiteProperties(oozieEnvProperties, 'oozie_database'):
+      oozieServerHost = self.getHostWithComponent('OOZIE', 'OOZIE_SERVER', services, hosts)
+      oozieDBConnectionURL = oozieSiteProperties['oozie.service.JPAService.jdbc.url']
+      protocol = self.getProtocol(oozieEnvProperties['oozie_database'])
+      oldSchemaName = getOldValue(self, services, "oozie-site", "oozie.db.schema.name")
+      # under these if constructions we are checking if oozie server hostname available,
+      # if schema name was changed or if protocol according to current db type differs with protocol in db connection url(db type was changed)
+      if oozieServerHost is not None:
+        if oldSchemaName or (protocol and oozieDBConnectionURL and not oozieDBConnectionURL.startswith(protocol)):
+          dbConnection = self.getDBConnectionString(oozieEnvProperties['oozie_database']).format(oozieServerHost['Hosts']['host_name'], oozieSiteProperties['oozie.db.schema.name'])
+          putOozieProperty('oozie.service.JPAService.jdbc.url', dbConnection)
+
+  def recommendHiveConfigurations(self, configurations, clusterData, services, hosts):
+    hiveSiteProperties = getSiteProperties(services['configurations'], 'hive-site')
+    hiveEnvProperties = getSiteProperties(services['configurations'], 'hive-env')
+    containerSize = clusterData['mapMemory'] if clusterData['mapMemory'] > 2048 else int(clusterData['reduceMemory'])
+    containerSize = min(clusterData['containers'] * clusterData['ramPerContainer'], containerSize)
+    container_size_bytes = int(containerSize)*1024*1024
+    putHiveEnvProperty = self.putProperty(configurations, "hive-env", services)
+    putHiveProperty = self.putProperty(configurations, "hive-site", services)
+    
+    servicesList = self.get_services_list(services)
+    if "TEZ" in servicesList:
+        putHiveProperty('hive.auto.convert.join.noconditionaltask.size', int(round(container_size_bytes / 3)))
+        putHiveProperty('hive.tez.java.opts', "-server -Xmx" + str(int(round((0.8 * containerSize) + 0.5)))
+                    + "m -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC -XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps")
+        putHiveProperty('hive.tez.container.size', containerSize)
+
+    # javax.jdo.option.ConnectionURL recommendations
+    if hiveEnvProperties and self.checkSiteProperties(hiveEnvProperties, 'hive_database', 'hive_database_type'):
+      putHiveEnvProperty('hive_database_type', self.getDBTypeAlias(hiveEnvProperties['hive_database']))
+    if hiveEnvProperties and hiveSiteProperties and self.checkSiteProperties(hiveSiteProperties, 'javax.jdo.option.ConnectionDriverName') and self.checkSiteProperties(hiveEnvProperties, 'hive_database'):
+      putHiveProperty('javax.jdo.option.ConnectionDriverName', self.getDBDriver(hiveEnvProperties['hive_database']))
+    if hiveSiteProperties and hiveEnvProperties and self.checkSiteProperties(hiveSiteProperties, 'ambari.hive.db.schema.name', 'javax.jdo.option.ConnectionURL') and self.checkSiteProperties(hiveEnvProperties, 'hive_database'):
+      hiveServerHost = self.getHostWithComponent('HIVE', 'HIVE_SERVER', services, hosts)
+      hiveDBConnectionURL = hiveSiteProperties['javax.jdo.option.ConnectionURL']
+      protocol = self.getProtocol(hiveEnvProperties['hive_database'])
+      oldSchemaName = getOldValue(self, services, "hive-site", "ambari.hive.db.schema.name")
+      oldDBType = getOldValue(self, services, "hive-env", "hive_database")
+      # under these if constructions we are checking if hive server hostname available,
+      # if it's default db connection url with "localhost" or if schema name was changed or if db type was changed (only for db type change from default mysql to existing mysql)
+      # or if protocol according to current db type differs with protocol in db connection url(other db types changes)
+      if hiveServerHost is not None:
+        if (hiveDBConnectionURL and "//localhost" in hiveDBConnectionURL) or oldSchemaName or oldDBType or (protocol and hiveDBConnectionURL and not hiveDBConnectionURL.startswith(protocol)):
+          dbConnection = self.getDBConnectionString(hiveEnvProperties['hive_database']).format(hiveServerHost['Hosts']['host_name'], hiveSiteProperties['ambari.hive.db.schema.name'])
+          putHiveProperty('javax.jdo.option.ConnectionURL', dbConnection)
+
+    if "PIG" in servicesList:
+        ambari_user = self.getAmbariUser(services)
+        ambariHostName = socket.getfqdn()
+        webHcatSiteProperty = self.putProperty(configurations, "webhcat-site", services)
+        webHcatSiteProperty("webhcat.proxyuser.{0}.hosts".format(ambari_user), ambariHostName)
+        webHcatSiteProperty("webhcat.proxyuser.{0}.groups".format(ambari_user), "*")
+        old_ambari_user = self.getOldAmbariUser(services)
+        if old_ambari_user is not None:
+            webHcatSitePropertyAttributes = self.putPropertyAttribute(configurations, "webhcat-site")
+            webHcatSitePropertyAttributes("webhcat.proxyuser.{0}.hosts".format(old_ambari_user), 'delete', 'true')
+            webHcatSitePropertyAttributes("webhcat.proxyuser.{0}.groups".format(old_ambari_user), 'delete', 'true')
+
+    if self.is_secured_cluster(services):
+      appendCoreSiteProperty = self.updateProperty(configurations, "core-site", services)
+
+      def updateCallback(originalValue, newValue):
+        """
+        :type originalValue str
+        :type newValue list
+        """
+        if originalValue and not originalValue.isspace():
+          hosts = originalValue.split(',')
+
+          if newValue:
+            hosts.extend(newValue)
+
+          result = ','.join(set(hosts))
+          return result
+        else:
+          return ','.join(set(newValue))
+
+      meta = self.get_service_component_meta("HIVE", "WEBHCAT_SERVER", services)
+      if "hostnames" in meta:
+        appendCoreSiteProperty('hadoop.proxyuser.HTTP.hosts', meta["hostnames"], updateCallback)
+
+  def recommendTezConfigurations(self, configurations, clusterData, services, hosts):
+    putTezProperty = self.putProperty(configurations, "tez-site")
+    putTezProperty("tez.am.resource.memory.mb", int(clusterData['amMemory']))
+    putTezProperty("tez.am.java.opts",
+                   "-server -Xmx" + str(int(0.8 * clusterData["amMemory"]))
+                   + "m -Djava.net.preferIPv4Stack=true -XX:+UseNUMA -XX:+UseParallelGC")
+    recommended_tez_queue = self.recommendYarnQueue(services, "tez-site", "tez.queue.name")
+    if recommended_tez_queue is not None:
+      putTezProperty("tez.queue.name", recommended_tez_queue)
+
+
+  def getNotPreferableOnServerComponents(self):
+    return ['STORM_UI_SERVER', 'DRPC_SERVER', 'STORM_REST_API', 'NIMBUS', 'GANGLIA_SERVER', 'METRICS_COLLECTOR']
+
+  def getNotValuableComponents(self):
+    return ['JOURNALNODE', 'ZKFC', 'GANGLIA_MONITOR', 'APP_TIMELINE_SERVER']
+
+  def getComponentLayoutSchemes(self):
+    parentSchemes = super(HDP21StackAdvisor, self).getComponentLayoutSchemes()
+    childSchemes = {
+        'APP_TIMELINE_SERVER': {31: 1, "else": 2},
+        'FALCON_SERVER': {6: 1, 31: 2, "else": 3}
+    }
+    parentSchemes.update(childSchemes)
+    return parentSchemes
+
+  def getServiceConfigurationValidators(self):
+    parentValidators = super(HDP21StackAdvisor, self).getServiceConfigurationValidators()
+    childValidators = {
+      "HIVE": {"hive-site": self.validateHiveConfigurations},
+      "TEZ": {"tez-site": self.validateTezConfigurations}
+    }
+    self.mergeValidators(parentValidators, childValidators)
+    return parentValidators
+
+  def validateHiveConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = [ 
+                        #{"config-name": 'hive.tez.container.size', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'hive.tez.container.size')},
+                        #{"config-name": 'hive.tez.java.opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'hive.tez.java.opts')},
+                        {"config-name": 'hive.auto.convert.join.noconditionaltask.size', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'hive.auto.convert.join.noconditionaltask.size')} ]
+    
+    servicesList = self.get_services_list(services)
+    if "TEZ" in servicesList:
+        yarnSiteProperties = getSiteProperties(configurations, "yarn-site")
+        if yarnSiteProperties:
+          yarnSchedulerMaximumAllocationMb = to_number(yarnSiteProperties["yarn.scheduler.maximum-allocation-mb"])
+          hiveTezContainerSize = to_number(properties['hive.tez.container.size'])
+          if hiveTezContainerSize is not None and yarnSchedulerMaximumAllocationMb is not None and hiveTezContainerSize > yarnSchedulerMaximumAllocationMb:
+            validationItems.append({"config-name": 'hive.tez.container.size', "item": self.getWarnItem("hive.tez.container.size is greater than the maximum container size specified in yarn.scheduler.maximum-allocation-mb")})
+    return self.toConfigurationValidationProblems(validationItems, "hive-site")
+
+  def validateTezConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = [ {"config-name": 'tez.am.resource.memory.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'tez.am.resource.memory.mb')},
+                        {"config-name": 'tez.am.java.opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'tez.am.java.opts')},
+                        {"config-name": 'tez.queue.name', "item": self.validatorYarnQueue(properties, recommendedDefaults, 'tez.queue.name', services)} ]
+    return self.toConfigurationValidationProblems(validationItems, "tez-site")
+
+  def getDBDriver(self, databaseType):
+    driverDict = {
+      'NEW MYSQL DATABASE': 'com.mysql.jdbc.Driver',
+      'NEW DERBY DATABASE': 'org.apache.derby.jdbc.EmbeddedDriver',
+      'EXISTING MYSQL DATABASE': 'com.mysql.jdbc.Driver',
+      'EXISTING MYSQL / MARIADB DATABASE': 'com.mysql.jdbc.Driver',
+      'EXISTING POSTGRESQL DATABASE': 'org.postgresql.Driver',
+      'EXISTING ORACLE DATABASE': 'oracle.jdbc.driver.OracleDriver',
+      'EXISTING SQL ANYWHERE DATABASE': 'sap.jdbc4.sqlanywhere.IDriver'
+    }
+    return driverDict.get(databaseType.upper())
+
+  def getDBConnectionString(self, databaseType):
+    driverDict = {
+      'NEW MYSQL DATABASE': 'jdbc:mysql://{0}/{1}?createDatabaseIfNotExist=true',
+      'NEW DERBY DATABASE': 'jdbc:derby:${{oozie.data.dir}}/${{oozie.db.schema.name}}-db;create=true',
+      'EXISTING MYSQL DATABASE': 'jdbc:mysql://{0}/{1}',
+      'EXISTING MYSQL / MARIADB DATABASE': 'jdbc:mysql://{0}/{1}',
+      'EXISTING POSTGRESQL DATABASE': 'jdbc:postgresql://{0}:5432/{1}',
+      'EXISTING ORACLE DATABASE': 'jdbc:oracle:thin:@//{0}:1521/{1}',
+      'EXISTING SQL ANYWHERE DATABASE': 'jdbc:sqlanywhere:host={0};database={1}'
+    }
+    return driverDict.get(databaseType.upper())
+
+  def getProtocol(self, databaseType):
+    first_parts_of_connection_string = {
+      'NEW MYSQL DATABASE': 'jdbc:mysql',
+      'NEW DERBY DATABASE': 'jdbc:derby',
+      'EXISTING MYSQL DATABASE': 'jdbc:mysql',
+      'EXISTING MYSQL / MARIADB DATABASE': 'jdbc:mysql',
+      'EXISTING POSTGRESQL DATABASE': 'jdbc:postgresql',
+      'EXISTING ORACLE DATABASE': 'jdbc:oracle',
+      'EXISTING SQL ANYWHERE DATABASE': 'jdbc:sqlanywhere'
+    }
+    return first_parts_of_connection_string.get(databaseType.upper())
+
+  def getDBTypeAlias(self, databaseType):
+    driverDict = {
+      'NEW MYSQL DATABASE': 'mysql',
+      'NEW DERBY DATABASE': 'derby',
+      'EXISTING MYSQL / MARIADB DATABASE': 'mysql',
+      'EXISTING MYSQL DATABASE': 'mysql',
+      'EXISTING POSTGRESQL DATABASE': 'postgres',
+      'EXISTING ORACLE DATABASE': 'oracle',
+      'EXISTING SQL ANYWHERE DATABASE': 'sqla'
+    }
+    return driverDict.get(databaseType.upper())


[12/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/files/zkServer.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/files/zkServer.sh b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/files/zkServer.sh
new file mode 100755
index 0000000..dd75a58
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/files/zkServer.sh
@@ -0,0 +1,120 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# If this scripted is run out of /usr/bin or some other system bin directory
+# it should be linked to and not copied. Things like java jar files are found
+# relative to the canonical path of this script.
+#
+
+# See the following page for extensive details on setting
+# up the JVM to accept JMX remote management:
+# http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
+# by default we allow local JMX connections
+if [ "x$JMXLOCALONLY" = "x" ]
+then
+    JMXLOCALONLY=false
+fi
+
+if [ "x$JMXDISABLE" = "x" ]
+then
+    echo "JMX enabled by default"
+    # for some reason these two options are necessary on jdk6 on Ubuntu
+    #   accord to the docs they are not necessary, but otw jconsole cannot
+    #   do a local attach
+    ZOOMAIN="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.local.only=$JMXLOCALONLY org.apache.zookeeper.server.quorum.QuorumPeerMain"
+else
+    echo "JMX disabled by user request"
+    ZOOMAIN="org.apache.zookeeper.server.quorum.QuorumPeerMain"
+fi
+
+# Only follow symlinks if readlink supports it
+if readlink -f "$0" > /dev/null 2>&1
+then
+  ZOOBIN=`readlink -f "$0"`
+else
+  ZOOBIN="$0"
+fi
+ZOOBINDIR=`dirname "$ZOOBIN"`
+
+. "$ZOOBINDIR"/zkEnv.sh
+
+if [ "x$2" != "x" ]
+then
+    ZOOCFG="$ZOOCFGDIR/$2"
+fi
+
+if $cygwin
+then
+    ZOOCFG=`cygpath -wp "$ZOOCFG"`
+    # cygwin has a "kill" in the shell itself, gets confused
+    KILL=/bin/kill
+else
+    KILL=kill
+fi
+
+echo "Using config: $ZOOCFG"
+
+ZOOPIDFILE=$(grep dataDir "$ZOOCFG" | sed -e 's/.*=//')/zookeeper_server.pid
+
+
+case $1 in
+start)
+    echo  "Starting zookeeper ... "
+    $JAVA  "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
+    -cp "$CLASSPATH" $JVMFLAGS $ZOOMAIN "$ZOOCFG" &
+    /bin/echo -n $! > "$ZOOPIDFILE"
+    echo STARTED
+    ;;
+stop)
+    echo "Stopping zookeeper ... "
+    if [ ! -f "$ZOOPIDFILE" ]
+    then
+    echo "error: could not find file $ZOOPIDFILE"
+    exit 1
+    else
+    $KILL -9 $(cat "$ZOOPIDFILE")
+    rm "$ZOOPIDFILE"
+    echo STOPPED
+    fi
+    ;;
+upgrade)
+    shift
+    echo "upgrading the servers to 3.*"
+    java "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
+    -cp "$CLASSPATH" $JVMFLAGS org.apache.zookeeper.server.upgrade.UpgradeMain ${@}
+    echo "Upgrading ... "
+    ;;
+restart)
+    shift
+    "$0" stop ${@}
+    sleep 3
+    "$0" start ${@}
+    ;;
+status)
+    STAT=`echo stat | nc localhost $(grep clientPort "$ZOOCFG" | sed -e 's/.*=//') 2> /dev/null| grep Mode`
+    if [ "x$STAT" = "x" ]
+    then
+        echo "Error contacting service. It is probably not running."
+    else
+        echo $STAT
+    fi
+    ;;
+*)
+    echo "Usage: $0 {start|stop|restart|status}" >&2
+
+esac

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/files/zkService.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/files/zkService.sh b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/files/zkService.sh
new file mode 100755
index 0000000..59bfe90
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/files/zkService.sh
@@ -0,0 +1,26 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+zkcli_script=$1
+user=$2
+conf_dir=$3
+/var/lib/ambari-agent/ambari-sudo.sh su $user -s /bin/bash - -c "source $conf_dir/zookeeper-env.sh ; echo 'ls /' | $zkcli_script"

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/files/zkSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/files/zkSmoke.sh b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/files/zkSmoke.sh
new file mode 100755
index 0000000..d194cc5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/files/zkSmoke.sh
@@ -0,0 +1,93 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+smoke_script=$1
+smoke_user=$2
+conf_dir=$3
+client_port=$4
+security_enabled=$5
+kinit_path_local=$6
+smoke_user_keytab=$7
+smokeuser_principal=$8
+export ZOOKEEPER_EXIT_CODE=0
+test_output_file=/tmp/zkSmoke.out
+errors_expr="ERROR|Exception"
+acceptable_expr="SecurityException"
+zkhosts=` grep "^\s*server\.[[:digit:]]"  $conf_dir/zoo.cfg  | cut -f 2 -d '=' | cut -f 1 -d ':' | tr '\n' ' ' `
+zk_node1=`echo $zkhosts | tr ' ' '\n' | head -n 1`
+echo "zk_node1=$zk_node1"
+if [[ $security_enabled == "True" ]]; then
+  kinitcmd="$kinit_path_local -kt $smoke_user_keytab $smokeuser_principal"
+  /var/lib/ambari-agent/ambari-sudo.sh su $smoke_user -s /bin/bash - -c "$kinitcmd"
+fi
+
+function verify_output() {
+  if [ -f $test_output_file ]; then
+    errors=`grep -E $errors_expr $test_output_file | grep -v $acceptable_expr`
+    if [ "$?" -eq 0 ]; then
+      echo "Error found in the zookeeper smoke test. Exiting."
+      echo $errors
+      exit 1
+    fi
+  fi
+}
+
+function rename_output(){
+  if [ -f $test_output_file ]; then
+    time=$(date +"%s")
+    output_file=$(echo $test_output_file | cut -f 1 -d '.')
+    errors=`mv $test_output_file $output_file$time.out`
+    if [ "$?" -ne 0 ]; then
+      echo "Error found in the zookeeper smoke test. Exiting."
+      echo $errors
+      exit 1
+    fi
+  fi
+}
+
+# Delete /zk_smoketest znode if exists
+/var/lib/ambari-agent/ambari-sudo.sh su $smoke_user -s /bin/bash - -c "source $conf_dir/zookeeper-env.sh ;  echo delete /zk_smoketest | ${smoke_script} -server $zk_node1:$client_port" 2>&1>$test_output_file
+# Create /zk_smoketest znode on one zookeeper server
+/var/lib/ambari-agent/ambari-sudo.sh su $smoke_user -s /bin/bash - -c "source $conf_dir/zookeeper-env.sh ; echo create /zk_smoketest smoke_data | ${smoke_script} -server $zk_node1:$client_port" 2>&1>>$test_output_file
+verify_output
+rename_output
+
+for i in $zkhosts ; do
+  echo "Running test on host $i"
+  # Verify the data associated with znode across all the nodes in the zookeeper quorum
+  /var/lib/ambari-agent/ambari-sudo.sh su $smoke_user -s /bin/bash - -c "source $conf_dir/zookeeper-env.sh ; echo 'get /zk_smoketest' | ${smoke_script} -server $i:$client_port"
+  /var/lib/ambari-agent/ambari-sudo.sh su $smoke_user -s /bin/bash - -c "source $conf_dir/zookeeper-env.sh ; echo 'ls /' | ${smoke_script} -server $i:$client_port"
+  output=$(/var/lib/ambari-agent/ambari-sudo.sh su $smoke_user -s /bin/bash - -c "source $conf_dir/zookeeper-env.sh ; echo 'get /zk_smoketest' | ${smoke_script} -server $i:$client_port")
+  echo $output | grep smoke_data
+  if [[ $? -ne 0 ]] ; then
+    echo "Data associated with znode /zk_smoketests is not consistent on host $i"
+    ((ZOOKEEPER_EXIT_CODE=$ZOOKEEPER_EXIT_CODE+1))
+  fi
+done
+
+/var/lib/ambari-agent/ambari-sudo.sh su $smoke_user -s /bin/bash - -c "source $conf_dir/zookeeper-env.sh ; echo 'delete /zk_smoketest' | ${smoke_script} -server $zk_node1:$client_port"
+if [[ "$ZOOKEEPER_EXIT_CODE" -ne "0" ]] ; then
+  echo "Zookeeper Smoke Test: Failed"
+else
+   echo "Zookeeper Smoke Test: Passed"
+fi
+exit $ZOOKEEPER_EXIT_CODE

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/scripts/__init__.py
new file mode 100755
index 0000000..35de4bb
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/scripts/__init__.py
@@ -0,0 +1,20 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/scripts/params.py
new file mode 100755
index 0000000..e9b8144
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/scripts/params.py
@@ -0,0 +1,96 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
+import status_params
+
+# server configurations
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+stack_version = format_stack_version(stack_version_unformatted)
+
+stack_name = default("/hostLevelParams/stack_name", None)
+
+# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
+version = default("/commandParams/version", None)
+current_version = default("/hostLevelParams/current_version", None)
+
+#hadoop params
+role_root = "zookeeper-client"
+command_role = default("/role", "")
+
+if command_role == "ZOOKEEPER_SERVER":
+  role_root = "zookeeper-server"
+
+zk_home = format("/usr/iop/current/{role_root}")
+zk_bin = format("/usr/iop/current/{role_root}/bin")
+zk_cli_shell = format("/usr/iop/current/{role_root}/bin/zkCli.sh")
+
+config_dir = "/usr/iop/current/zookeeper-client/conf"
+zk_user =  config['configurations']['zookeeper-env']['zk_user']
+hostname = config['hostname']
+user_group = config['configurations']['cluster-env']['user_group']
+zk_env_sh_template = config['configurations']['zookeeper-env']['content']
+
+zk_log_dir = config['configurations']['zookeeper-env']['zk_log_dir']
+zk_data_dir = config['configurations']['zoo.cfg']['dataDir']
+zk_pid_dir = status_params.zk_pid_dir
+zk_pid_file = status_params.zk_pid_file
+zk_server_heapsize = "-Xmx1024m"
+
+client_port = default('/configurations/zoo.cfg/clientPort', None)
+
+if 'zoo.cfg' in config['configurations']:
+  zoo_cfg_properties_map = config['configurations']['zoo.cfg']
+else:
+  zoo_cfg_properties_map = {}
+zoo_cfg_properties_map_length = len(zoo_cfg_properties_map)
+
+zk_principal_name = default("/configurations/zookeeper-env/zookeeper_principal_name", "zookeeper@EXAMPLE.COM")
+zk_principal = zk_principal_name.replace('_HOST',hostname.lower())
+
+java64_home = config['hostLevelParams']['java_home']
+
+zookeeper_hosts = config['clusterHostInfo']['zookeeper_hosts']
+zookeeper_hosts.sort()
+
+zk_keytab_path = config['configurations']['zookeeper-env']['zookeeper_keytab_path']
+zk_server_jaas_file = format("{config_dir}/zookeeper_jaas.conf")
+zk_client_jaas_file = format("{config_dir}/zookeeper_client_jaas.conf")
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
+kinit_path_local = functions.get_kinit_path()
+
+#log4j.properties
+if (('zookeeper-log4j' in config['configurations']) and ('content' in config['configurations']['zookeeper-log4j'])):
+  log4j_props = config['configurations']['zookeeper-log4j']['content']
+else:
+  log4j_props = None

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/scripts/service_check.py
new file mode 100755
index 0000000..4b3f8d4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/scripts/service_check.py
@@ -0,0 +1,53 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+class ZookeeperServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    File(format("{tmp_dir}/zkSmoke.sh"),
+         mode=0755,
+         content=StaticFile('zkSmoke.sh')
+    )
+
+    if params.security_enabled:
+      smokeUserKeytab=params.smoke_user_keytab
+      smokeUserPrincipal=params.smokeuser_principal
+    else:
+      smokeUserKeytab= "no_keytab"
+      smokeUserPrincipal="no_principal"
+
+
+    cmd_quorum = format("{tmp_dir}/zkSmoke.sh {zk_cli_shell} {smokeuser} {config_dir} {client_port} "
+                  "{security_enabled} {kinit_path_local} {smokeUserKeytab} {smokeUserPrincipal}")
+
+    Execute(cmd_quorum,
+            tries=3,
+            try_sleep=5,
+            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+            logoutput=True
+    )
+
+if __name__ == "__main__":
+  ZookeeperServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/scripts/status_params.py
new file mode 100755
index 0000000..d7765b2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/scripts/status_params.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+# a map of the Ambari role to the component name
+# for use with /usr/iop/current/<component>
+SERVER_ROLE_DIRECTORY_MAP = {
+  'ZOOKEEPER_SERVER' : 'zookeeper-server',
+  'ZOOKEEPER_CLIENT' : 'zookeeper-client'
+}
+
+component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "ZOOKEEPER_CLIENT")
+
+config = Script.get_config()
+
+zk_pid_dir = config['configurations']['zookeeper-env']['zk_pid_dir']
+zk_pid_file = format("{zk_pid_dir}/zookeeper_server.pid")
+
+# Security related/required params
+hostname = config['hostname']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+kinit_path_local = functions.get_kinit_path()
+tmp_dir = Script.get_tmp_dir()
+config_dir = format("/usr/iop/current/{component_directory}/conf")
+zk_user =  config['configurations']['zookeeper-env']['zk_user']

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/scripts/zookeeper.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/scripts/zookeeper.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/scripts/zookeeper.py
new file mode 100755
index 0000000..d861f6f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/scripts/zookeeper.py
@@ -0,0 +1,114 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import os
+import sys
+
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+
+
+def zookeeper(type = None, upgrade_type=None):
+  import params
+
+  Directory(params.config_dir,
+            owner=params.zk_user,
+            create_parents=True,
+            group=params.user_group
+  )
+
+  File(format("{config_dir}/zookeeper-env.sh"),
+       content=InlineTemplate(params.zk_env_sh_template),
+       owner=params.zk_user,
+       group=params.user_group
+  )
+
+
+  configFile("zoo.cfg", template_name="zoo.cfg.j2")
+  configFile("configuration.xsl", template_name="configuration.xsl.j2")
+
+  Directory(params.zk_pid_dir,
+            owner=params.zk_user,
+            create_parents=True,
+            group=params.user_group
+  )
+
+  Directory(params.zk_log_dir,
+            owner=params.zk_user,
+            create_parents=True,
+            group=params.user_group
+  )
+
+  Directory(params.zk_data_dir,
+            owner=params.zk_user,
+            create_parents=True,
+            cd_access="a",
+            group=params.user_group
+  )
+
+  if type == 'server':
+    myid = str(sorted(params.zookeeper_hosts).index(params.hostname) + 1)
+
+    File(format("{zk_data_dir}/myid"),
+         mode = 0644,
+         content = myid
+    )
+    # This path may be missing after Ambari upgrade. We need to create it.
+    if (upgrade_type == "rolling") and (not os.path.exists("/usr/iop/current/zookeeper-server")) and params.current_version:
+      conf_select(params.stack_name, "zookeeper", params.current_version)
+      stack_select.select("zookeeper-server", params.version)
+      #Execute(format("stack-select set zookeeper-server {version}"))
+
+  if (params.log4j_props != None):
+    File(format("{params.config_dir}/log4j.properties"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.zk_user,
+         content=params.log4j_props
+    )
+  elif (os.path.exists(format("{params.config_dir}/log4j.properties"))):
+    File(format("{params.config_dir}/log4j.properties"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.zk_user
+    )
+
+  if params.security_enabled:
+    if type == "server":
+      configFile("zookeeper_jaas.conf", template_name="zookeeper_jaas.conf.j2")
+      configFile("zookeeper_client_jaas.conf", template_name="zookeeper_client_jaas.conf.j2")
+    else:
+      configFile("zookeeper_client_jaas.conf", template_name="zookeeper_client_jaas.conf.j2")
+
+  File(format("{config_dir}/zoo_sample.cfg"),
+       owner=params.zk_user,
+       group=params.user_group
+  )
+
+
+def configFile(name, template_name=None):
+  import params
+
+  File(format("{config_dir}/{name}"),
+       content=Template(template_name),
+       owner=params.zk_user,
+       group=params.user_group
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/scripts/zookeeper_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/scripts/zookeeper_client.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/scripts/zookeeper_client.py
new file mode 100755
index 0000000..80ca08a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/scripts/zookeeper_client.py
@@ -0,0 +1,71 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
+from resource_management.libraries.functions.format import format
+
+from zookeeper import zookeeper
+
+class ZookeeperClient(Script):
+
+  def get_component_name(self):
+    return "zookeeper-client"
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    zookeeper(type='client')
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Stack Upgrade pre-restart")
+    import params
+    env.set_params(params)
+
+    if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
+      conf_select.select(params.stack_name, "zookeeper", params.version)
+      stack_select.select("zookeeper-client", params.version)
+      #Execute(format("stack-select set zookeeper-client {version}"))
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env)
+    pass
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    pass
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+  ZookeeperClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/scripts/zookeeper_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/scripts/zookeeper_server.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/scripts/zookeeper_server.py
new file mode 100755
index 0000000..ccba9ce
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/scripts/zookeeper_server.py
@@ -0,0 +1,161 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import random
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import get_unique_id_and_date
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_JAAS_CONF
+from resource_management.core.shell import call
+from resource_management.core.logger import Logger
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.validate import call_and_match_output
+from zookeeper import zookeeper
+from zookeeper_service import zookeeper_service
+
+
+class ZookeeperServer(Script):
+
+  def get_component_name(self):
+    return "zookeeper-server"
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    zookeeper(type='server', upgrade_type=upgrade_type)
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Stack Upgrade pre-restart")
+    import params
+    env.set_params(params)
+
+    if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
+      conf_select.select(params.stack_name, "zookeeper", params.version)
+      stack_select.select("zookeeper-server", params.version)
+      #Execute(format("stack-select set zookeeper-server {version}"))
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env, upgrade_type)
+    zookeeper_service(action = 'start', upgrade_type=upgrade_type)
+
+  def post_upgrade_restart(self, env, upgrade_type=None):
+    if upgrade_type == "nonrolling":
+      return
+
+    Logger.info("Executing Stack Upgrade post-restart")
+    import params
+    env.set_params(params)
+    zk_server_host = random.choice(params.zookeeper_hosts)
+    cli_shell = format("{zk_cli_shell} -server {zk_server_host}:{client_port}")
+    # Ensure that a quorum is still formed.
+    unique = get_unique_id_and_date()
+    create_command = format("echo 'create /{unique} mydata' | {cli_shell}")
+    list_command = format("echo 'ls /' | {cli_shell}")
+    delete_command = format("echo 'delete /{unique} ' | {cli_shell}")
+
+    quorum_err_message = "Failed to establish zookeeper quorum"
+    call_and_match_output(create_command, 'Created', quorum_err_message)
+    call_and_match_output(list_command, r"\[.*?" + unique + ".*?\]", quorum_err_message)
+    call(delete_command)
+
+    if params.client_port:
+      check_leader_command = format("echo stat | nc localhost {client_port} | grep Mode")
+      code, out = call(check_leader_command, logoutput=False)
+      if code == 0 and out:
+        Logger.info(out)
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    zookeeper_service(action = 'stop', upgrade_type=upgrade_type)
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.zk_pid_file)
+
+  def security_status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+
+    if status_params.security_enabled:
+      # Expect the following files to be available in status_params.config_dir:
+      #   zookeeper_jaas.conf
+      #   zookeeper_client_jaas.conf
+      try:
+        props_value_check = None
+        props_empty_check = ['Server/keyTab', 'Server/principal']
+        props_read_check = ['Server/keyTab']
+        zk_env_expectations = build_expectations('zookeeper_jaas', props_value_check, props_empty_check,
+                                                 props_read_check)
+
+        zk_expectations = {}
+        zk_expectations.update(zk_env_expectations)
+
+        security_params = get_params_from_filesystem(status_params.config_dir,
+                                                   {'zookeeper_jaas.conf': FILE_TYPE_JAAS_CONF})
+
+        result_issues = validate_security_config_properties(security_params, zk_expectations)
+        if not result_issues:  # If all validations passed successfully
+          # Double check the dict before calling execute
+          if ( 'zookeeper_jaas' not in security_params
+               or 'Server' not in security_params['zookeeper_jaas']
+               or 'keyTab' not in security_params['zookeeper_jaas']['Server']
+               or 'principal' not in security_params['zookeeper_jaas']['Server']):
+            self.put_structured_out({"securityState": "ERROR"})
+            self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.zk_user,
+                                security_params['zookeeper_jaas']['Server']['keyTab'],
+                                security_params['zookeeper_jaas']['Server']['principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        else:
+          issues = []
+          for cf in result_issues:
+            issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+          self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+          self.put_structured_out({"securityState": "UNSECURED"})
+      except Exception as e:
+        self.put_structured_out({"securityState": "ERROR"})
+        self.put_structured_out({"securityStateErrorInfo": str(e)})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+
+if __name__ == "__main__":
+  ZookeeperServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/scripts/zookeeper_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/scripts/zookeeper_service.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/scripts/zookeeper_service.py
new file mode 100755
index 0000000..1dc24cd
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/scripts/zookeeper_service.py
@@ -0,0 +1,58 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import os
+
+from resource_management import *
+
+def zookeeper_service(action='start', upgrade_type=None):
+  import params
+
+  # This path may be missing after Ambari upgrade. We need to create it.
+  if upgrade_type is None and not os.path.exists("/usr/iop/current/zookeeper-server") and params.current_version \
+    and compare_versions(format_stack_version(params.version), '4.1.0.0') >= 0:
+    conf_select.select(params.stack_name, "zookeeper", params.current_version)
+    stack_select.select("zookeeper-server", params.version)
+
+  cmd = format("env ZOOCFGDIR={config_dir} ZOOCFG=zoo.cfg {zk_bin}/zkServer.sh")
+
+  if action == 'start':
+    daemon_cmd = format("source {config_dir}/zookeeper-env.sh ; {cmd} start")
+    no_op_test = format("ls {zk_pid_file} >/dev/null 2>&1 && ps -p `cat {zk_pid_file}` >/dev/null 2>&1")
+    Execute(daemon_cmd,
+            not_if=no_op_test,
+            user=params.zk_user
+    )
+
+    if params.security_enabled:
+      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
+
+      Execute(kinit_cmd,
+              user=params.smokeuser
+      )
+
+  elif action == 'stop':
+    daemon_cmd = format("source {config_dir}/zookeeper-env.sh ; {cmd} stop")
+    rm_pid = format("rm -f {zk_pid_file}")
+    Execute(daemon_cmd,
+            user=params.zk_user
+    )
+    Execute(rm_pid)

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/templates/configuration.xsl.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/templates/configuration.xsl.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/templates/configuration.xsl.j2
new file mode 100755
index 0000000..8830c45
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/templates/configuration.xsl.j2
@@ -0,0 +1,42 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+<?xml version="1.0"?>
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+<xsl:output method="html"/>
+<xsl:template match="configuration">
+<html>
+<body>
+<table border="1">
+<tr>
+ <td>name</td>
+ <td>value</td>
+ <td>description</td>
+</tr>
+<xsl:for-each select="property">
+  <tr>
+     <td><a name="{name}"><xsl:value-of select="name"/></a></td>
+     <td><xsl:value-of select="value"/></td>
+     <td><xsl:value-of select="description"/></td>
+  </tr>
+</xsl:for-each>
+</table>
+</body>
+</html>
+</xsl:template>
+</xsl:stylesheet>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/templates/zoo.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/templates/zoo.cfg.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/templates/zoo.cfg.j2
new file mode 100755
index 0000000..1fbc0cc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/templates/zoo.cfg.j2
@@ -0,0 +1,53 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+{% for key, value in zoo_cfg_properties_map.iteritems() -%}
+  {{key}}={{value}}
+{% endfor %}
+{% for host in zookeeper_hosts -%}
+server.{{loop.index}}={{host}}:2888:3888
+{% endfor %}
+
+{% if security_enabled -%}
+authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
+jaasLoginRenew=3600000
+kerberos.removeHostFromPrincipal=true
+kerberos.removeRealmFromPrincipal=true
+{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2
new file mode 100755
index 0000000..38f9721
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2
@@ -0,0 +1,23 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=false
+useTicketCache=true;
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2
new file mode 100755
index 0000000..c3e9505
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2
@@ -0,0 +1,26 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Server {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=true
+storeKey=true
+useTicketCache=false
+keyTab="{{zk_keytab_path}}"
+principal="{{zk_principal}}";
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/stack_advisor.py
new file mode 100755
index 0000000..f049acf
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/stack_advisor.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env ambari-python-wrap
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from stack_advisor_25 import *
+
+class BigInsights40StackAdvisor(HDP25StackAdvisor):
+
+  pass


[26/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/hive_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/hive_client.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/hive_client.py
new file mode 100755
index 0000000..8d586da
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/hive_client.py
@@ -0,0 +1,81 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import sys
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from hive import hive
+from ambari_commons.os_family_impl import OsFamilyImpl
+from ambari_commons import OSConst
+
+class HiveClient(Script):
+  def install(self, env):
+    import params
+    self.install_packages(env)
+    self.configure(env)
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hive(name='client')
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HiveClientDefault(HiveClient):
+  def get_component_name(self):
+    return "hadoop-client"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if params.version and compare_versions(format_stack_version(params.version), '4.1.0.0') >= 0:
+      conf_select.select(params.stack_name, "hive", params.version)
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-client", params.version)
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    """
+    Execute hdp-select before reconfiguring this client to the new HDP version.
+
+    :param env:
+    :param upgrade_type:
+    :return:
+    """
+    Logger.info("Executing Hive HCat Client Stack Upgrade pre-restart")
+
+    import params
+    env.set_params(params)
+
+    # this function should not execute if the version can't be determined or
+    # is not at least HDP 2.2.0.0
+    if not params.version or compare_versions(params.version, "2.2", format=True) < 0:
+      return
+
+    # HCat client doesn't have a first-class entry in hdp-select. Since clients always
+    # update after daemons, this ensures that the hcat directories are correct on hosts
+    # which do not include the WebHCat daemon
+    stack_select.select("hive-webhcat", params.version)
+
+if __name__ == "__main__":
+  HiveClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/hive_metastore.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/hive_metastore.py
new file mode 100755
index 0000000..0a9a066
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/hive_metastore.py
@@ -0,0 +1,199 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+
+from resource_management.core.logger import Logger
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.version import compare_versions
+from resource_management.libraries.functions.security_commons import build_expectations
+from resource_management.libraries.functions.security_commons import cached_kinit_executor
+from resource_management.libraries.functions.security_commons import get_params_from_filesystem
+from resource_management.libraries.functions.security_commons import validate_security_config_properties
+from resource_management.libraries.functions.security_commons import FILE_TYPE_XML
+from resource_management.core.resources.system import File
+
+from hive import hive
+from hive import jdbc_connector
+from hive_service import hive_service
+from ambari_commons.os_family_impl import OsFamilyImpl
+from ambari_commons import OSConst
+
+# the legacy conf.server location in IOP
+LEGACY_HIVE_SERVER_CONF = "/etc/hive/conf.server"
+
+class HiveMetastore(Script):
+  def install(self, env):
+    import params
+    self.install_packages(env)
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env)  # FOR SECURITY
+    hive_service('metastore', action='start', upgrade_type=upgrade_type)
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    hive_service('metastore', action='stop', upgrade_type=upgrade_type)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hive(name = 'metastore')
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HiveMetastoreDefault(HiveMetastore):
+  def get_component_name(self):
+    return "hive-metastore"
+
+  def status(self, env):
+    import status_params
+    from resource_management.libraries.functions import check_process_status
+
+    env.set_params(status_params)
+    pid_file = format("{hive_pid_dir}/{hive_metastore_pid}")
+    # Recursively check all existing gmetad pid files
+    check_process_status(pid_file)
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Metastore Rolling Upgrade pre-restart")
+    import params
+    env.set_params(params)
+
+    if Script.is_stack_greater_or_equal("4.1.0.0"):
+      self.upgrade_schema(env)
+
+    if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
+      conf_select.select(params.stack_name, "hive", params.version)
+      stack_select.select("hive-metastore", params.version)
+
+  def security_status(self, env):
+    import status_params
+    env.set_params(status_params)
+    if status_params.security_enabled:
+      props_value_check = {"hive.server2.authentication": "KERBEROS",
+                           "hive.metastore.sasl.enabled": "true",
+                           "hive.security.authorization.enabled": "true"}
+      props_empty_check = ["hive.metastore.kerberos.keytab.file",
+                           "hive.metastore.kerberos.principal"]
+
+      props_read_check = ["hive.metastore.kerberos.keytab.file"]
+      hive_site_props = build_expectations('hive-site', props_value_check, props_empty_check,
+                                            props_read_check)
+
+      hive_expectations ={}
+      hive_expectations.update(hive_site_props)
+
+      security_params = get_params_from_filesystem(status_params.hive_conf_dir,
+                                                   {'hive-site.xml': FILE_TYPE_XML})
+      result_issues = validate_security_config_properties(security_params, hive_expectations)
+      if not result_issues: # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if 'hive-site' not in security_params \
+            or 'hive.metastore.kerberos.keytab.file' not in security_params['hive-site'] \
+            or 'hive.metastore.kerberos.principal' not in security_params['hive-site']:
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.hive_user,
+                                security_params['hive-site']['hive.metastore.kerberos.keytab.file'],
+                                security_params['hive-site']['hive.metastore.kerberos.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+
+  def upgrade_schema(self, env):
+    """
+    Executes the schema upgrade binary.  This is its own function because it could
+    be called as a standalone task from the upgrade pack, but is safe to run it for each
+    metastore instance.
+
+    The metastore schema upgrade requires a database driver library for most
+    databases. During an upgrade, it's possible that the library is not present,
+    so this will also attempt to copy/download the appropriate driver.
+    """
+    Logger.info("Upgrading Hive Metastore")
+    import params
+    env.set_params(params)
+
+    if params.security_enabled:
+      kinit_command=format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal}; ")
+      Execute(kinit_command,user=params.smokeuser)
+
+    # ensure that the JDBC drive is present for the schema tool; if it's not
+    # present, then download it first
+    if params.hive_jdbc_driver in params.hive_jdbc_drivers_list and params.hive_use_existing_db:
+      source = format("/usr/iop/{current_version}/hive/lib/{jdbc_jar_name}")
+      target_directory = format("/usr/iop/{version}/hive/lib")
+      if not os.path.exists(source):
+        # download it
+        jdbc_connector()
+
+      Execute(('cp', source, target_directory),
+        path=["/bin", "/usr/bin/"], sudo = True)
+
+      File(os.path.join(target_directory, os.path.basename(source)),
+        mode = 0644,
+      )
+
+    # build the schema tool command
+    binary = format("/usr/iop/{version}/hive/bin/schematool")
+
+    # the conf.server directory changed locations
+    # since the configurations have not been written out yet during an upgrade
+    # we need to choose the original legacy location
+    schematool_hive_server_conf_dir = params.hive_server_conf_dir
+    if params.current_version is not None:
+      current_version = format_stack_version(params.current_version)
+      if compare_versions(current_version, "4.1.0.0") < 0:
+        schematool_hive_server_conf_dir = LEGACY_HIVE_SERVER_CONF
+
+    env_dict = {
+      'HIVE_CONF_DIR': schematool_hive_server_conf_dir
+    }
+
+    command = format("{binary} -dbType {hive_metastore_db_type} -upgradeSchema")
+    Execute(command, user=params.hive_user, tries=1, environment=env_dict, logoutput=True)
+
+if __name__ == "__main__":
+  HiveMetastore().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/hive_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/hive_server.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/hive_server.py
new file mode 100755
index 0000000..5d2dc9f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/hive_server.py
@@ -0,0 +1,166 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
+from resource_management.libraries.functions.get_stack_version import get_stack_version
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_XML
+from ambari_commons import OSCheck, OSConst
+if OSCheck.is_windows_family():
+  from resource_management.libraries.functions.windows_service_utils import check_windows_service_status
+from ambari_commons.os_family_impl import OsFamilyImpl
+from ambari_commons.constants import UPGRADE_TYPE_ROLLING
+from resource_management.core.logger import Logger
+
+import hive_server_upgrade
+from hive import hive
+from hive_service import hive_service
+
+
+class HiveServer(Script):
+  def install(self, env):
+    import params
+    self.install_packages(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hive(name='hiveserver2')
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HiveServerDefault(HiveServer):
+  def get_component_name(self):
+    return "hive-server2"
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+
+    hive_service( 'hiveserver2', action = 'start', upgrade_type=upgrade_type)
+
+    # only perform this if upgrading and rolling; a non-rolling upgrade doesn't need
+    # to do this since hive is already down
+    if upgrade_type == UPGRADE_TYPE_ROLLING:
+      hive_server_upgrade.post_upgrade_deregister()
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if upgrade_type != UPGRADE_TYPE_ROLLING:
+      hive_service( 'hiveserver2', action = 'stop' )
+    #else:
+    #  hive_server_upgrade.pre_upgrade_deregister()
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{hive_pid_dir}/{hive_pid}")
+
+    # Recursively check all existing gmetad pid files
+    check_process_status(pid_file)
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing HiveServer2 Rolling Upgrade pre-restart")
+    import params
+    env.set_params(params)
+
+    if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
+      conf_select.select(params.stack_name, "hive", params.version)
+      stack_select.select("hive-server2", params.version)
+      #Execute(format("stack-select set hive-server2 {version}"))
+      resource_created = copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user)
+      if resource_created:
+        params.HdfsResource(None, action="execute")
+
+  def security_status(self, env):
+    import status_params
+    env.set_params(status_params)
+    if status_params.security_enabled:
+      props_value_check = {"hive.server2.authentication": "KERBEROS",
+                           "hive.metastore.sasl.enabled": "true",
+                           "hive.security.authorization.enabled": "true"}
+      props_empty_check = ["hive.server2.authentication.kerberos.keytab",
+                           "hive.server2.authentication.kerberos.principal",
+                           "hive.server2.authentication.spnego.principal",
+                           "hive.server2.authentication.spnego.keytab"]
+
+      props_read_check = ["hive.server2.authentication.kerberos.keytab",
+                          "hive.server2.authentication.spnego.keytab"]
+      hive_site_props = build_expectations('hive-site', props_value_check, props_empty_check,
+                                            props_read_check)
+
+      hive_expectations ={}
+      hive_expectations.update(hive_site_props)
+
+      security_params = get_params_from_filesystem(status_params.hive_conf_dir,
+                                                   {'hive-site.xml': FILE_TYPE_XML})
+      result_issues = validate_security_config_properties(security_params, hive_expectations)
+      if not result_issues: # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if 'hive-site' not in security_params \
+            or 'hive.server2.authentication.kerberos.keytab' not in security_params['hive-site'] \
+            or 'hive.server2.authentication.kerberos.principal' not in security_params['hive-site']  \
+            or 'hive.server2.authentication.spnego.keytab' not in security_params['hive-site'] \
+            or 'hive.server2.authentication.spnego.principal' not in security_params['hive-site']:
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.hive_user,
+                                security_params['hive-site']['hive.server2.authentication.kerberos.keytab'],
+                                security_params['hive-site']['hive.server2.authentication.kerberos.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.hive_user,
+                                security_params['hive-site']['hive.server2.authentication.spnego.keytab'],
+                                security_params['hive-site']['hive.server2.authentication.spnego.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+
+if __name__ == "__main__":
+  HiveServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/hive_server_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/hive_server_upgrade.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/hive_server_upgrade.py
new file mode 100755
index 0000000..318fcca
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/hive_server_upgrade.py
@@ -0,0 +1,174 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+import re
+from resource_management.core.logger import Logger
+from resource_management.core.exceptions import Fail
+from resource_management.core.resources.system import Execute
+from resource_management.core import shell
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.version import compare_versions
+
+
+def pre_upgrade_deregister():
+  """
+  Runs the "hive --service hiveserver2 --deregister <version>" command to
+  de-provision the server in preparation for an upgrade. This will contact
+  ZooKeeper to remove the server so that clients that attempt to connect
+  will be directed to other servers automatically. Once all
+  clients have drained, the server will shutdown automatically; this process
+  could take a very long time.
+  This function will obtain the Kerberos ticket if security is enabled.
+  :return:
+  """
+  import params
+
+  Logger.info('HiveServer2 executing "deregister" command in preparation for upgrade...')
+
+  if params.security_enabled:
+    kinit_command=format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal}; ")
+    Execute(kinit_command,user=params.smokeuser)
+
+  # calculate the current hive server version
+  current_hiveserver_version = _get_current_hiveserver_version()
+  if current_hiveserver_version is None:
+    raise Fail('Unable to determine the current HiveServer2 version to deregister.')
+
+  # fallback when upgrading because /usr/iop/current/hive-server2/conf/conf.server may not exist
+  hive_server_conf_dir = params.hive_server_conf_dir
+  if not os.path.exists(hive_server_conf_dir):
+    hive_server_conf_dir = "/etc/hive/conf.server"
+
+  # deregister
+  hive_execute_path = params.execute_path
+  # If upgrading, the upgrade-target hive binary should be used to call the --deregister command.
+  # If downgrading, the downgrade-source hive binary should be used to call the --deregister command.
+  if "upgrade" == params.upgrade_direction:
+    # hive_bin
+    upgrade_target_version = format_stack_version(params.version)
+    if upgrade_target_version and compare_versions(upgrade_target_version, "4.1.0.0") >= 0:
+      upgrade_target_hive_bin = format('/usr/iop/{version}/hive/bin')
+      if (os.pathsep + params.hive_bin) in hive_execute_path:
+        hive_execute_path = hive_execute_path.replace(os.pathsep + params.hive_bin, os.pathsep + upgrade_target_hive_bin)
+    # hadoop_bin_dir
+    upgrade_target_hadoop_bin = stack_select.get_hadoop_dir("bin", upgrade_stack_only=True)
+    upgrade_source_hadoop_bin = params.hadoop_bin_dir
+    if upgrade_target_hadoop_bin and len(upgrade_target_hadoop_bin) > 0 and (os.pathsep + upgrade_source_hadoop_bin) in hive_execute_path:
+      hive_execute_path = hive_execute_path.replace(os.pathsep + upgrade_source_hadoop_bin, os.pathsep + upgrade_target_hadoop_bin)
+
+  command = format('hive --config {hive_server_conf_dir} --service hiveserver2 --deregister ' + current_hiveserver_version)
+  Execute(command, user=params.hive_user, path=hive_execute_path, tries=1 )
+
+
+def _get_current_hiveserver_version():
+  """
+  Runs "hive --version" and parses the result in order
+  to obtain the current version of hive.
+
+  :return:  the hiveserver2 version, returned by "hive --version"
+  """
+  import params
+
+  try:
+    command = 'hive --version'
+    return_code, iop_output = shell.call(command, user=params.hive_user, path=params.execute_path)
+  except Exception, e:
+    Logger.error(str(e))
+    raise Fail('Unable to execute hive --version command to retrieve the hiveserver2 version.')
+
+  if return_code != 0:
+    raise Fail('Unable to determine the current HiveServer2 version because of a non-zero return code of {0}'.format(str(return_code)))
+
+  match = re.search('^(Hive) ([0-9]+.[0-9]+.\S+)', iop_output, re.MULTILINE)
+
+  if match:
+    current_hive_server_version = match.group(2)
+    return current_hive_server_version
+  else:
+    raise Fail('The extracted hiveserver2 version "{0}" does not matching any known pattern'.format(iop_output))
+
+def post_upgrade_deregister():
+  """
+  Runs the "hive --service hiveserver2 --deregister <version>" command to
+  de-provision the server in preparation for an upgrade. This will contact
+  ZooKeeper to remove the server so that clients that attempt to connect
+  will be directed to other servers automatically. Once all
+  clients have drained, the server will shutdown automatically; this process
+  could take a very long time.
+  This function will obtain the Kerberos ticket if security is enabled.
+  :return:
+  """
+  import params
+
+  Logger.info('HiveServer2 executing "deregister" command to complete upgrade...')
+
+  if params.security_enabled:
+    kinit_command=format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal}; ")
+    Execute(kinit_command,user=params.smokeuser)
+
+  # calculate the current hive server version
+  current_hiveserver_version = _get_current_hiveserver_version()
+  if current_hiveserver_version is None:
+    raise Fail('Unable to determine the current HiveServer2 version to deregister.')
+
+  # fallback when upgrading because /usr/hdp/current/hive-server2/conf/conf.server may not exist
+  hive_server_conf_dir = params.hive_server_conf_dir
+  if not os.path.exists(hive_server_conf_dir):
+    hive_server_conf_dir = "/etc/hive/conf.server"
+
+  # deregister
+  hive_execute_path = params.execute_path
+  # If upgrading, the upgrade-target hive binary should be used to call the --deregister command.
+  # If downgrading, the downgrade-source hive binary should be used to call the --deregister command.
+  # By now hdp-select has been called to set 'current' to target-stack
+  if "downgrade" == params.upgrade_direction:
+    # hive_bin
+    downgrade_version = params.current_version
+    if params.downgrade_from_version:
+      downgrade_version = params.downgrade_from_version
+    hive_execute_path = _get_hive_execute_path(downgrade_version)
+
+  command = format('hive --config {hive_server_conf_dir} --service hiveserver2 --deregister ' + current_hiveserver_version)
+  Execute(command, user=params.hive_user, path=hive_execute_path, tries=1 )
+
+def _get_hive_execute_path(stack_version):
+  """
+  Returns the exact execute path to use for the given stack-version.
+  This method does not return the "current" path
+  :param stack_version: Exact stack-version to use in the new path
+  :return: Hive execute path for the exact hdp stack-version
+  """
+  import params
+
+  hive_execute_path = params.execute_path
+  formatted_stack_version = format_stack_version(stack_version)
+  if formatted_stack_version and compare_versions(formatted_stack_version, "4.1") >= 0:
+    # hive_bin
+    new_hive_bin = format('/usr/iop/{stack_version}/hive/bin')
+    if (os.pathsep + params.hive_bin) in hive_execute_path:
+      hive_execute_path = hive_execute_path.replace(os.pathsep + params.hive_bin, os.pathsep + new_hive_bin)
+    # hadoop_bin_dir
+    new_hadoop_bin = stack_select.get_hadoop_dir_for_stack_version("bin", stack_version)
+    old_hadoop_bin = params.hadoop_bin_dir
+    if new_hadoop_bin and len(new_hadoop_bin) > 0 and (os.pathsep + old_hadoop_bin) in hive_execute_path:
+      hive_execute_path = hive_execute_path.replace(os.pathsep + old_hadoop_bin, os.pathsep + new_hadoop_bin)
+  return hive_execute_path

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/hive_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/hive_service.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/hive_service.py
new file mode 100755
index 0000000..c3bf30c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/hive_service.py
@@ -0,0 +1,139 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+import time
+
+from ambari_commons.constants import UPGRADE_TYPE_ROLLING
+from resource_management.core import shell
+from resource_management.libraries.functions.format import format
+from resource_management.core.resources.system import File, Execute
+from resource_management.core.resources.service import Service
+from resource_management.core.exceptions import Fail
+from resource_management.core.shell import as_user
+from resource_management.libraries.functions.hive_check import check_thrift_port_sasl
+
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def hive_service(name, action='start', upgrade_type=None):
+  import params
+  if name == 'metastore':
+    if action == 'start' or action == 'stop':
+      Service(params.hive_metastore_win_service_name, action=action)
+
+  if name == 'hiveserver2':
+    if action == 'start' or action == 'stop':
+      Service(params.hive_server_win_service_name, action=action)
+
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def hive_service(name, action='start', upgrade_type=None):
+
+  import params
+
+  if name == 'metastore':
+    pid_file = format("{hive_pid_dir}/{hive_metastore_pid}")
+    cmd = format("{start_metastore_path} {hive_log_dir}/hive.out {hive_log_dir}/hive.log {pid_file} {hive_server_conf_dir} {hive_log_dir}")
+  elif name == 'hiveserver2':
+    pid_file = format("{hive_pid_dir}/{hive_pid}")
+    cmd = format("{start_hiveserver2_path} {hive_log_dir}/hive-server2.out {hive_log_dir}/hive-server2.log {pid_file} {hive_server_conf_dir} {hive_log_dir}")
+
+  pid_expression = "`" + as_user(format("cat {pid_file}"), user=params.hive_user) + "`"
+  process_id_exists_command = format("ls {pid_file} >/dev/null 2>&1 && ps -p {pid_expression} >/dev/null 2>&1")
+
+  if action == 'start':
+    if name == 'hiveserver2':
+      check_fs_root()
+
+    daemon_cmd = cmd
+    hadoop_home = params.hadoop_home
+    hive_bin = "hive"
+
+    # upgrading hiveserver2 (rolling_restart) means that there is an existing,
+    # de-registering hiveserver2; the pid will still exist, but the new
+    # hiveserver is spinning up on a new port, so the pid will be re-written
+    if upgrade_type == UPGRADE_TYPE_ROLLING:
+      process_id_exists_command = None
+
+      if (params.version):
+        import os
+        hadoop_home = format("/usr/iop/{version}/hadoop")
+        hive_bin = os.path.join(params.hive_bin, hive_bin)
+
+    if params.security_enabled:
+      hive_kinit_cmd = format("{kinit_path_local} -kt {hive_server2_keytab} {hive_principal}; ")
+      Execute(hive_kinit_cmd, user=params.hive_user)
+
+    Execute(daemon_cmd,
+      user = params.hive_user,
+      environment = { 'HADOOP_HOME': hadoop_home, 'JAVA_HOME': params.java64_home, 'HIVE_BIN': hive_bin },
+      path = params.execute_path,
+      not_if = process_id_exists_command)
+
+    if params.hive_jdbc_driver == "com.mysql.jdbc.Driver" or \
+       params.hive_jdbc_driver == "org.postgresql.Driver" or \
+       params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
+
+      db_connection_check_command = format(
+        "{java64_home}/bin/java -cp {check_db_connection_jar}:{target} org.apache.ambari.server.DBConnectionVerification '{hive_jdbc_connection_url}' {hive_metastore_user_name} {hive_metastore_user_passwd!p} {hive_jdbc_driver}")
+
+      Execute(db_connection_check_command,
+              path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin', tries=5, try_sleep=10)
+  elif action == 'stop':
+
+    daemon_kill_cmd = format("{sudo} kill {pid_expression}")
+    daemon_hard_kill_cmd = format("{sudo} kill -9 {pid_expression}")
+
+    Execute(daemon_kill_cmd,
+      not_if = format("! ({process_id_exists_command})")
+    )
+
+    wait_time = 5
+    Execute(daemon_hard_kill_cmd,
+      not_if = format("( sleep {wait_time} && ! ({process_id_exists_command}) )")
+    )
+
+    # check if stopped the process, else fail the task
+    Execute(format("! ({process_id_exists_command})"),
+      tries=20,
+      try_sleep=3,
+    )
+
+    File(pid_file,
+         action = "delete"
+    )
+
+def check_fs_root():
+  import params
+  metatool_cmd = format("hive --config {hive_server_conf_dir} --service metatool")
+  cmd = as_user(format("{metatool_cmd} -listFSRoot", env={'PATH': params.execute_path}), params.hive_user) \
+        + format(" 2>/dev/null | grep hdfs:// | cut -f1,2,3 -d '/' | grep -v '{fs_root}' | head -1")
+  code, out = shell.call(cmd)
+
+  if code == 0 and out.strip() != "" and params.fs_root.strip() != out.strip():
+    out = out.strip()
+    cmd = format("{metatool_cmd} -updateLocation {fs_root} {out}")
+    Execute(cmd,
+            user=params.hive_user,
+            environment={'PATH': params.execute_path}
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/mysql_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/mysql_server.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/mysql_server.py
new file mode 100755
index 0000000..918a871
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/mysql_server.py
@@ -0,0 +1,64 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+import os
+import mysql_users
+from resource_management import *
+
+from mysql_service import mysql_service
+from mysql_utils import mysql_configure
+
+
+class MysqlServer(Script):
+  def install(self, env):
+    import params
+    self.install_packages(env)
+    self.configure(env)
+
+  def clean(self, env):
+    import params
+    env.set_params(params)
+    mysql_users.mysql_deluser()
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    mysql_configure()
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    mysql_service(daemon_name=params.daemon_name, action='start')
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    mysql_service(daemon_name=params.daemon_name, action='stop')
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+
+    mysql_service(daemon_name=status_params.daemon_name, action='status')
+
+
+if __name__ == "__main__":
+  MysqlServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/mysql_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/mysql_service.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/mysql_service.py
new file mode 100755
index 0000000..0e52a0d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/mysql_service.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+def mysql_service(daemon_name=None, action='start'):
+  cnf = file('/etc/my.cnf')
+  for line in cnf:
+    if line.strip().startswith('pid-file'):
+      pid_file = line.split('=')[1].strip()
+      break
+  pid_expression = "`" + "cat " + pid_file + "`"
+  status_cmd = "ls " + pid_file + " >/dev/null 2>&1 && ps -p " + pid_expression + " >/dev/null 2>&1"
+  cmd = ('service', daemon_name, action)
+
+  if action == 'status':
+    try:
+      Execute(status_cmd)
+    except Fail:
+      raise ComponentIsNotRunning()
+  elif action == 'stop':
+    import params
+    Execute(cmd,
+            logoutput = True,
+            only_if = status_cmd,
+            sudo = True,
+    )
+  elif action == 'start':
+    import params
+    Execute(cmd,
+      logoutput = True,
+      not_if = status_cmd,
+      sudo = True,
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/mysql_users.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/mysql_users.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/mysql_users.py
new file mode 100755
index 0000000..dae5899
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/mysql_users.py
@@ -0,0 +1,69 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+# Used to add hive access to the needed components
+def mysql_adduser():
+  import params
+
+  File(params.mysql_adduser_path,
+       mode=0755,
+       content=StaticFile('addMysqlUser.sh')
+  )
+  hive_server_host = format("{hive_server_host}")
+  hive_metastore_host = format("{hive_metastore_host}")
+
+  add_metastore_cmd = "bash -x {mysql_adduser_path} {daemon_name} {hive_metastore_user_name} {hive_metastore_user_passwd!p} {hive_metastore_host}"
+  add_hiveserver_cmd = "bash -x {mysql_adduser_path} {daemon_name} {hive_metastore_user_name} {hive_metastore_user_passwd!p} {hive_server_host}"
+  if (hive_server_host == hive_metastore_host):
+    cmd = format(add_hiveserver_cmd)
+  else:
+    cmd = format(add_hiveserver_cmd + ";" + add_metastore_cmd)
+  Execute(cmd,
+          tries=3,
+          try_sleep=5,
+          logoutput=False,
+          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
+  )
+
+# Removes hive access from components
+def mysql_deluser():
+  import params
+
+  File(params.mysql_deluser_path,
+       mode=0755,
+       content=StaticFile('removeMysqlUser.sh')
+  )
+  hive_server_host = format("{hive_server_host}")
+  hive_metastore_host = format("{hive_metastore_host}")
+
+  del_hiveserver_cmd = "bash -x {mysql_deluser_path} {daemon_name} {hive_metastore_user_name} {hive_server_host}"
+  del_metastore_cmd = "bash -x {mysql_deluser_path} {daemon_name} {hive_metastore_user_name} {hive_metastore_host}"
+  if (hive_server_host == hive_metastore_host):
+    cmd = format(del_hiveserver_cmd)
+  else:
+    cmd = format(
+      del_hiveserver_cmd + ";" + del_metastore_cmd)
+  Execute(cmd,
+          tries=3,
+          try_sleep=5,
+          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/mysql_utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/mysql_utils.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/mysql_utils.py
new file mode 100755
index 0000000..0c1af20
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/mysql_utils.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import mysql_users
+
+def mysql_configure():
+  import params
+
+  # required for running hive
+  replace_bind_address = ('sed','-i','s|^bind-address[ \t]*=.*|bind-address = 0.0.0.0|',params.mysql_configname)
+  Execute(replace_bind_address,
+          sudo = True,
+  )
+
+  # this also will start mysql-server
+  mysql_users.mysql_adduser()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/params.py
new file mode 100755
index 0000000..e5fe128
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/params.py
@@ -0,0 +1,418 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import status_params
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+import os
+
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+from ambari_commons.os_check import OSCheck
+
+from resource_management.libraries.functions.constants import Direction
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
+from resource_management.libraries.functions.copy_tarball import STACK_VERSION_PATTERN
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.get_port_from_url import get_port_from_url
+from resource_management.libraries import functions
+
+# server configurations
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+sudo = AMBARI_SUDO_BINARY
+
+stack_name = default("/hostLevelParams/stack_name", None)
+
+# node hostname
+hostname = config["hostname"]
+
+# This is expected to be of the form #.#.#.#
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+stack_version = format_stack_version(stack_version_unformatted)
+stack_is_21 = False
+
+# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade.
+# It cannot be used during the initial Cluser Install because the version is not yet known.
+version = default("/commandParams/version", None)
+
+# current host stack version
+current_version = default("/hostLevelParams/current_version", None)
+
+# Upgrade direction
+upgrade_direction = default("/commandParams/upgrade_direction", None)
+
+# When downgrading the 'version' and 'current_version' are both pointing to the downgrade-target version
+# downgrade_from_version provides the source-version the downgrade is happening from
+downgrade_from_version = default("/commandParams/downgrade_from_version", None)
+
+component_directory = status_params.component_directory
+hadoop_bin_dir = "/usr/bin"
+hadoop_home = '/usr'
+hive_bin = '/usr/lib/hive/bin'
+hive_lib = '/usr/lib/hive/lib'
+
+#Hbase params keep hbase lib here,if not,mapreduce job doesn't work for hive.
+hbase_lib = '/usr/iop/current/hbase-client/lib'
+
+# Hadoop params
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hadoop_home = '/usr/iop/current/hadoop-client'
+hive_bin = format('/usr/iop/current/{component_directory}/bin')
+hive_lib = format('/usr/iop/current/{component_directory}/lib')
+hive_var_lib = '/var/lib/hive'
+
+# if this is a server action, then use the server binaries; smoke tests
+# use the client binaries
+command_role = default("/role", "")
+server_role_dir_mapping = { 'HIVE_SERVER' : 'hive-server2',
+  'HIVE_METASTORE' : 'hive-metastore' }
+
+if command_role in server_role_dir_mapping:
+  hive_server_root = server_role_dir_mapping[command_role]
+  hive_bin = format('/usr/iop/current/{hive_server_root}/bin')
+  hive_lib = format('/usr/iop/current/{hive_server_root}/lib')
+
+hive_specific_configs_supported = False
+hive_etc_dir_prefix = "/etc/hive"
+limits_conf_dir = "/etc/security/limits.d"
+hcat_conf_dir = '/etc/hive-hcatalog/conf'
+config_dir = '/etc/hive-webhcat/conf'
+hcat_lib = '/usr/iop/current/hive-webhcat/share/hcatalog'
+webhcat_bin_dir = '/usr/iop/current/hive-webhcat/sbin'
+
+# use the directories from status_params as they are already calculated for
+# the correct version of BigInsights
+hadoop_conf_dir = status_params.hadoop_conf_dir
+hadoop_bin_dir = status_params.hadoop_bin_dir
+webhcat_conf_dir = status_params.webhcat_conf_dir
+hive_conf_dir = status_params.hive_conf_dir
+hive_config_dir = status_params.hive_config_dir
+hive_client_conf_dir = status_params.hive_client_conf_dir
+hive_server_conf_dir = status_params.hive_server_conf_dir
+
+hcat_lib = '/usr/iop/current/hive-webhcat/share/hcatalog'
+webhcat_bin_dir = '/usr/iop/current/hive-webhcat/sbin'
+component_directory = status_params.component_directory
+hadoop_home = '/usr/iop/current/hadoop-client'
+hive_bin = format('/usr/iop/current/{component_directory}/bin')
+hive_lib = format('/usr/iop/current/{component_directory}/lib')
+
+# there are no client versions of these, use server versions directly
+hcat_lib = '/usr/iop/current/hive-webhcat/share/hcatalog'
+webhcat_bin_dir = '/usr/iop/current/hive-webhcat/sbin'
+
+# --- Tarballs ---
+# DON'T CHANGE THESE VARIABLE NAMES
+# Values don't change from those in copy_tarball.py
+
+hive_tar_source = "/usr/iop/{0}/hive/hive.tar.gz".format(STACK_VERSION_PATTERN)
+pig_tar_source = "/usr/iop/{0}/pig/pig.tar.gz".format(STACK_VERSION_PATTERN)
+hive_tar_dest_file = "/iop/apps/{0}/hive/hive.tar.gz".format(STACK_VERSION_PATTERN)
+pig_tar_dest_file = "/iop/apps/{0}/pig/pig.tar.gz".format(STACK_VERSION_PATTERN)
+
+hadoop_streaming_tar_source = "/usr/iop/{0}/hadoop-mapreduce/hadoop-streaming.jar".format(STACK_VERSION_PATTERN)
+sqoop_tar_source = "/usr/iop/{0}/sqoop/sqoop.tar.gz".format(STACK_VERSION_PATTERN)
+hadoop_streaming_tar_dest_dir = "/iop/apps/{0}/mapreduce/".format(STACK_VERSION_PATTERN)
+sqoop_tar_dest_dir = "/iop/apps/{0}/sqoop/".format(STACK_VERSION_PATTERN)
+
+tarballs_mode = 0444
+
+if Script.is_stack_greater_or_equal("4.1.0.0"):
+  # this is NOT a typo.  BigInsights-4.1 configs for hcatalog/webhcat point to a
+  # specific directory which is NOT called 'conf'
+  hcat_conf_dir = '/usr/iop/current/hive-webhcat/etc/hcatalog'
+  config_dir = '/usr/iop/current/hive-webhcat/etc/webhcat'
+if Script.is_stack_greater_or_equal("4.2.0.0"):
+  # need to set it to false if it is to downgrade from 4.2 to 4.1
+  if upgrade_direction is not None and upgrade_direction == Direction.DOWNGRADE and version is not None and compare_versions(format_stack_version(version), '4.2.0.0') < 0:
+    hive_specific_configs_supported = False
+  else:
+    #means it's either an upgrade or a fresh install of 4.2
+    hive_specific_configs_supported = True
+
+else: #BI 4.0
+  #still need to use current dir due to rolling upgrade restrictions
+  # --- Tarballs ---
+  webhcat_apps_dir = "/apps/webhcat"
+
+execute_path = os.environ['PATH'] + os.pathsep + hive_bin + os.pathsep + hadoop_bin_dir
+hive_metastore_user_name = config['configurations']['hive-site']['javax.jdo.option.ConnectionUserName']
+hive_jdbc_connection_url = config['configurations']['hive-site']['javax.jdo.option.ConnectionURL']
+
+hive_metastore_user_passwd = config['configurations']['hive-site']['javax.jdo.option.ConnectionPassword']
+hive_metastore_db_type = config['configurations']['hive-env']['hive_database_type']
+#HACK Temporarily use dbType=azuredb while invoking schematool
+if hive_metastore_db_type == "mssql":
+  hive_metastore_db_type = "azuredb"
+
+#users
+hive_user = config['configurations']['hive-env']['hive_user']
+#JDBC driver jar name
+hive_jdbc_driver = config['configurations']['hive-site']['javax.jdo.option.ConnectionDriverName']
+if hive_jdbc_driver == "com.microsoft.sqlserver.jdbc.SQLServerDriver":
+  jdbc_jar_name = "sqljdbc4.jar"
+  jdbc_symlink_name = "mssql-jdbc-driver.jar"
+elif hive_jdbc_driver == "com.mysql.jdbc.Driver":
+  jdbc_jar_name = "mysql-connector-java.jar"
+  jdbc_symlink_name = "mysql-jdbc-driver.jar"
+elif hive_jdbc_driver == "org.postgresql.Driver":
+  jdbc_jar_name = "postgresql-jdbc.jar"
+  jdbc_symlink_name = "postgres-jdbc-driver.jar"
+elif hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
+  jdbc_jar_name = "ojdbc.jar"
+  jdbc_symlink_name = "oracle-jdbc-driver.jar"
+
+check_db_connection_jar_name = "DBConnectionVerification.jar"
+check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
+hive_jdbc_drivers_list = ["com.microsoft.sqlserver.jdbc.SQLServerDriver","com.mysql.jdbc.Driver","org.postgresql.Driver","oracle.jdbc.driver.OracleDriver"]
+downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}")
+prepackaged_ojdbc_symlink = format("{hive_lib}/ojdbc6.jar")
+templeton_port = config['configurations']['webhcat-site']['templeton.port']
+
+
+#common
+hive_metastore_hosts = config['clusterHostInfo']['hive_metastore_host']
+hive_metastore_host = hive_metastore_hosts[0]
+hive_metastore_port = get_port_from_url(config['configurations']['hive-site']['hive.metastore.uris']) #"9083"
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+hive_server_host = config['clusterHostInfo']['hive_server_host'][0]
+hive_server_hosts = config['clusterHostInfo']['hive_server_host']
+hive_transport_mode = config['configurations']['hive-site']['hive.server2.transport.mode']
+
+if hive_transport_mode.lower() == "http":
+  hive_server_port = config['configurations']['hive-site']['hive.server2.thrift.http.port']
+else:
+  hive_server_port = default('/configurations/hive-site/hive.server2.thrift.port',"10000")
+
+hive_url = format("jdbc:hive2://{hive_server_host}:{hive_server_port}")
+hive_http_endpoint = default('/configurations/hive-site/hive.server2.thrift.http.path', "cliservice")
+hive_server_principal = config['configurations']['hive-site']['hive.server2.authentication.kerberos.principal']
+hive_server2_authentication = config['configurations']['hive-site']['hive.server2.authentication']
+
+# ssl options
+hive_ssl = default('/configurations/hive-site/hive.server2.use.SSL', False)
+hive_ssl_keystore_path = default('/configurations/hive-site/hive.server2.keystore.path', None)
+hive_ssl_keystore_password = default('/configurations/hive-site/hive.server2.keystore.password', None)
+
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+smoke_test_sql = format("{tmp_dir}/hiveserver2.sql")
+smoke_test_path = format("{tmp_dir}/hiveserver2Smoke.sh")
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
+
+fs_root = config['configurations']['core-site']['fs.defaultFS']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+hive_metastore_keytab_path =  config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
+
+hive_server2_keytab = config['configurations']['hive-site']['hive.server2.authentication.kerberos.keytab']
+
+#hive_env
+hive_log_dir = config['configurations']['hive-env']['hive_log_dir']
+hive_pid_dir = status_params.hive_pid_dir
+hive_pid = status_params.hive_pid
+
+#Default conf dir for client
+hive_conf_dirs_list = [hive_client_conf_dir]
+
+if hostname in hive_metastore_hosts or hostname in hive_server_hosts:
+  hive_conf_dirs_list.append(hive_server_conf_dir)
+
+#hive-site
+hive_database_name = config['configurations']['hive-env']['hive_database_name']
+hive_database = config['configurations']['hive-env']['hive_database']
+
+#Starting hiveserver2
+start_hiveserver2_script = 'startHiveserver2.sh.j2'
+
+##Starting metastore
+start_metastore_script = 'startMetastore.sh'
+hive_metastore_pid = status_params.hive_metastore_pid
+java_share_dir = '/usr/share/java'
+driver_curl_target = format("{java_share_dir}/{jdbc_jar_name}")
+
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+yarn_user = config['configurations']['yarn-env']['yarn_user']
+user_group = config['configurations']['cluster-env']['user_group']
+artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
+
+target = format("{hive_lib}/{jdbc_jar_name}")
+
+jdk_location = config['hostLevelParams']['jdk_location']
+driver_curl_source = format("{jdk_location}/{jdbc_symlink_name}")
+
+start_hiveserver2_path = format("{tmp_dir}/start_hiveserver2_script")
+start_metastore_path = format("{tmp_dir}/start_metastore_script")
+
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
+
+if 'role' in config and config['role'] in ["HIVE_SERVER", "HIVE_METASTORE"]:
+  hive_heapsize = config['configurations']['hive-env']['hive.heapsize']
+else:
+  hive_heapsize = config['configurations']['hive-env']['hive.client.heapsize']
+
+hive_metastore_heapsize = config['configurations']['hive-env']['hive.metastore.heapsize']
+
+java64_home = config['hostLevelParams']['java_home']
+java_version = int(config['hostLevelParams']['java_version'])
+
+##### MYSQL
+db_name = config['configurations']['hive-env']['hive_database_name']
+mysql_group = 'mysql'
+mysql_host = config['clusterHostInfo']['hive_mysql_host']
+
+mysql_adduser_path = format("{tmp_dir}/addMysqlUser.sh")
+mysql_deluser_path = format("{tmp_dir}/removeMysqlUser.sh")
+
+######## Metastore Schema
+init_metastore_schema = False
+if Script.is_stack_greater_or_equal("4.1.0.0"):
+  init_metastore_schema = True
+
+
+########## HCAT
+hcat_dbroot = hcat_lib
+
+hcat_user = config['configurations']['hive-env']['hcat_user']
+webhcat_user = config['configurations']['hive-env']['webhcat_user']
+
+hcat_pid_dir = status_params.hcat_pid_dir
+hcat_log_dir = config['configurations']['hive-env']['hcat_log_dir']
+hcat_env_sh_template = config['configurations']['hcat-env']['content']
+
+#hive-log4j.properties.template
+if (('hive-log4j' in config['configurations']) and ('content' in config['configurations']['hive-log4j'])):
+  log4j_props = config['configurations']['hive-log4j']['content']
+else:
+  log4j_props = None
+
+#webhcat-log4j.properties.template
+if (('webhcat-log4j' in config['configurations']) and ('content' in config['configurations']['webhcat-log4j'])):
+  log4j_webhcat_props = config['configurations']['webhcat-log4j']['content']
+else:
+  log4j_webhcat_props = None
+
+#hive-exec-log4j.properties.template
+if (('hive-exec-log4j' in config['configurations']) and ('content' in config['configurations']['hive-exec-log4j'])):
+  log4j_exec_props = config['configurations']['hive-exec-log4j']['content']
+else:
+  log4j_exec_props = None
+
+daemon_name = status_params.daemon_name
+process_name = status_params.process_name
+hive_env_sh_template = config['configurations']['hive-env']['content']
+
+hive_hdfs_user_dir = format("/user/{hive_user}")
+hive_hdfs_user_mode = 0700
+hive_apps_whs_dir = config['configurations']['hive-site']["hive.metastore.warehouse.dir"]
+hive_exec_scratchdir = config['configurations']['hive-site']["hive.exec.scratchdir"]
+#for create_hdfs_directory
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', 'missing_principal').replace("_HOST", hostname)
+
+# Tez-related properties
+tez_user = None #config['configurations']['tez-env']['tez_user']
+
+# Tez jars
+tez_local_api_jars = None #'/usr/lib/tez/tez*.jar'
+tez_local_lib_jars = None #'/usr/lib/tez/lib/*.jar'
+
+# Tez libraries
+tez_lib_uris = None #default("/configurations/tez-site/tez.lib.uris", None)
+
+if OSCheck.is_ubuntu_family():
+  mysql_configname = '/etc/mysql/my.cnf'
+else:
+  mysql_configname = '/etc/my.cnf'
+
+mysql_user = 'mysql'
+
+# Hive security
+hive_authorization_enabled = config['configurations']['hive-site']['hive.security.authorization.enabled']
+
+mysql_jdbc_driver_jar = "/usr/share/java/mysql-connector-java.jar"
+hive_use_existing_db = hive_database.startswith('Existing')
+hive_exclude_packages = []
+
+# There are other packages that contain /usr/share/java/mysql-connector-java.jar (like libmysql-java),
+# trying to install mysql-connector-java upon them can cause packages to conflict.
+if hive_use_existing_db:
+  hive_exclude_packages = ['mysql-connector-java', 'mysql', 'mysql-server',
+                           'mysql-community-release', 'mysql-community-server']
+else:
+  if 'role' in config and config['role'] != "MYSQL_SERVER":
+    hive_exclude_packages = ['mysql', 'mysql-server', 'mysql-community-release',
+                             'mysql-community-server']
+  if os.path.exists(mysql_jdbc_driver_jar):
+    hive_exclude_packages.append('mysql-connector-java')
+
+
+hive_site_config = dict(config['configurations']['hive-site'])
+
+########################################################
+########### WebHCat related params #####################
+########################################################
+
+webhcat_env_sh_template = config['configurations']['webhcat-env']['content']
+templeton_log_dir = config['configurations']['hive-env']['hcat_log_dir']
+templeton_pid_dir = status_params.hcat_pid_dir
+
+webhcat_pid_file = status_params.webhcat_pid_file
+
+templeton_jar = config['configurations']['webhcat-site']['templeton.jar']
+
+
+webhcat_server_host = config['clusterHostInfo']['webhcat_server_host']
+
+hcat_hdfs_user_dir = format("/user/{hcat_user}")
+hcat_hdfs_user_mode = 0755
+webhcat_hdfs_user_dir = format("/user/{webhcat_user}")
+webhcat_hdfs_user_mode = 0755
+#for create_hdfs_directory
+security_param = "true" if security_enabled else "false"
+
+
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
+import functools
+#create partial functions with common arguments for every HdfsResource call
+#to create hdfs directory we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user = hdfs_user,
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
+)
+
+if security_enabled:
+  hive_principal = hive_server_principal.replace('_HOST',hostname.lower())

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/postgresql_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/postgresql_server.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/postgresql_server.py
new file mode 100755
index 0000000..a1cd13f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/postgresql_server.py
@@ -0,0 +1,109 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from postgresql_service import postgresql_service
+
+class PostgreSQLServer(Script):
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    # init the database, the ':' makes the command always return 0 in case the database has
+    # already been initialized when the postgresql server colocates with ambari server
+    Execute(format("service {postgresql_daemon_name} initdb || :"))
+
+    # update the configuration files
+    self.update_pghda_conf(env)
+    self.update_postgresql_conf(env)
+
+    # restart the postgresql server for the changes to take effect
+    self.stop(env)
+    self.start(env)
+
+    # create the database and hive_metastore_user
+    File(params.postgresql_adduser_path,
+         mode=0755,
+         content=StaticFile(format("{postgresql_adduser_file}"))
+    )
+
+    cmd = format("bash -x {postgresql_adduser_path} {postgresql_daemon_name} {hive_metastore_user_name} {hive_metastore_user_passwd!p} {db_name}")
+
+    Execute(cmd,
+            tries=3,
+            try_sleep=5,
+            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
+    )
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+
+    postgresql_service(postgresql_daemon_name=params.postgresql_daemon_name, action = 'start')
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    postgresql_service(postgresql_daemon_name=params.postgresql_daemon_name, action = 'stop')
+
+  def status(self, env):
+    import status_params
+    postgresql_service(postgresql_daemon_name=status_params.postgresql_daemon_name, action = 'status')
+
+  def update_postgresql_conf(self, env):
+    import params
+    env.set_params(params)
+
+    # change the listen_address to *
+    Execute(format("sed -i '/^[[:space:]]*listen_addresses[[:space:]]*=.*/d' {postgresql_conf_path}"))
+    Execute(format("echo \"listen_addresses = '*'\" | tee -a {postgresql_conf_path}"))
+
+    # change the standard_conforming_string to off
+    Execute(format("sed -i '/^[[:space:]]*standard_conforming_strings[[:space:]]*=.*/d' {postgresql_conf_path}"))
+    Execute(format("echo \"standard_conforming_strings = off\" | tee -a {postgresql_conf_path}"))
+
+  def update_pghda_conf(self, env):
+    import params
+    env.set_params(params)
+
+    # trust hive_metastore_user and postgres locally
+    Execute(format("sed -i '/^[[:space:]]*local[[:space:]]*all[[:space:]]*all.*$/s/^/#/' {postgresql_pghba_conf_path}"))
+    Execute(format("sed -i '/^[[:space:]]*local[[:space:]]*all[[:space:]]*postgres.*$/d' {postgresql_pghba_conf_path}"))
+    Execute(format("sed -i '/^[[:space:]]*local[[:space:]]*all[[:space:]]*\"{hive_metastore_user_name}\".*$/d' {postgresql_pghba_conf_path}"))
+    Execute(format("echo \"local   all   postgres   trust\" | tee -a {postgresql_pghba_conf_path}"))
+    Execute(format("echo \"local   all   \\\"{hive_metastore_user_name}\\\" trust\" | tee -a {postgresql_pghba_conf_path}"))
+
+    # trust hive_metastore_user and postgres via local interface
+    Execute(format("sed -i '/^[[:space:]]*host[[:space:]]*all[[:space:]]*all.*$/s/^/#/' {postgresql_pghba_conf_path}"))
+    Execute(format("sed -i '/^[[:space:]]*host[[:space:]]*all[[:space:]]*postgres.*$/d' {postgresql_pghba_conf_path}"))
+    Execute(format("sed -i '/^[[:space:]]*host[[:space:]]*all[[:space:]]*\"{hive_metastore_user_name}\".*$/d' {postgresql_pghba_conf_path}"))
+    Execute(format("echo \"host    all   postgres         0.0.0.0/0       trust\" | tee -a {postgresql_pghba_conf_path}"))
+    Execute(format("echo \"host    all   \\\"{hive_metastore_user_name}\\\"         0.0.0.0/0       trust\" | tee -a {postgresql_pghba_conf_path}"))
+
+if __name__ == "__main__":
+  PostgreSQLServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/postgresql_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/postgresql_service.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/postgresql_service.py
new file mode 100755
index 0000000..6443e05
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/postgresql_service.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+def postgresql_service(postgresql_daemon_name=None, action='start'):
+  status_cmd = format('service {postgresql_daemon_name} status | grep running')
+  cmd = format('service {postgresql_daemon_name} {action}')
+
+  if action == 'status':
+    Execute(status_cmd)
+  elif action == 'stop':
+    Execute(cmd,
+            logoutput = True,
+            only_if = status_cmd
+    )
+  elif action == 'start':
+    Execute(cmd,
+      logoutput = True,
+      not_if = status_cmd
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/service_check.py
new file mode 100755
index 0000000..98584cc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/service_check.py
@@ -0,0 +1,91 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import socket
+import sys
+import time
+from hcat_service_check import hcat_service_check
+from webhcat_service_check import webhcat_service_check
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+
+
+class HiveServiceCheck(Script):
+  pass
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    if params.hive_server2_authentication == "KERBEROS" or params.hive_server2_authentication == "NONE":
+
+      address_list = params.hive_server_hosts
+
+      if not address_list:
+        raise Fail("Can not find any Hive Server host. Please check configuration.")
+
+      port = int(format("{hive_server_port}"))
+      print "Test connectivity to hive server"
+      if params.security_enabled:
+        kinitcmd=format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal}; ")
+      else:
+        kinitcmd=None
+
+      SOCKET_WAIT_SECONDS = 290
+
+      start_time = time.time()
+      end_time = start_time + SOCKET_WAIT_SECONDS
+
+      print "Waiting for the Hive server to start..."
+
+      workable_server_available = False
+      i = 0
+      while time.time() < end_time and not workable_server_available:
+        address = address_list[i]
+        try:
+          check_thrift_port_sasl(address, port, params.hive_server2_authentication,
+                                 params.hive_server_principal, kinitcmd, params.smokeuser,
+                                 transport_mode=params.hive_transport_mode, http_endpoint=params.hive_http_endpoint,
+                                 ssl=params.hive_ssl, ssl_keystore=params.hive_ssl_keystore_path,
+                                 ssl_password=params.hive_ssl_keystore_password)
+          print "Successfully connected to %s on port %s" % (address, port)
+          workable_server_available = True
+        except:
+          print "Connection to %s on port %s failed" % (address, port)
+          time.sleep(5)
+
+        i += 1
+        if i == len(address_list):
+          i = 0
+
+      elapsed_time = time.time() - start_time
+
+      if not workable_server_available:
+        raise Fail("Connection to Hive server %s on port %s failed after %d seconds" %
+                   (params.hostname, params.hive_server_port, elapsed_time))
+
+      print "Successfully connected to Hive at %s on port %s after %d seconds" %\
+            (params.hostname, params.hive_server_port, elapsed_time)
+
+    hcat_service_check()
+    webhcat_service_check()
+
+if __name__ == "__main__":
+  HiveServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/status_params.py
new file mode 100755
index 0000000..d0f761b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/status_params.py
@@ -0,0 +1,87 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from ambari_commons import OSCheck
+
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
+
+# a map of the Ambari role to the component name
+# for use with /usr/iop/current/<component>
+SERVER_ROLE_DIRECTORY_MAP = {
+  'HIVE_METASTORE' : 'hive-metastore',
+  'HIVE_SERVER' : 'hive-server2',
+  'WEBHCAT_SERVER' : 'hive-webhcat',
+  'HIVE_CLIENT' : 'hive-client',
+  'HCAT' : 'hive-client'
+}
+
+component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "HIVE_CLIENT")
+
+config = Script.get_config()
+
+hive_pid_dir = config['configurations']['hive-env']['hive_pid_dir']
+hive_pid = 'hive-server.pid'
+
+hive_metastore_pid = 'hive.pid'
+
+hcat_pid_dir = config['configurations']['hive-env']['hcat_pid_dir'] #hcat_pid_dir
+webhcat_pid_file = format('{hcat_pid_dir}/webhcat.pid')
+
+process_name = 'mysqld'
+if OSCheck.is_suse_family() or OSCheck.is_ubuntu_family():
+  daemon_name = 'mysql'
+elif OSCheck.is_redhat_family() and int(OSCheck.get_os_major_version()) >= 7:
+  daemon_name = 'mariadb'
+else:
+  daemon_name = 'mysqld'
+
+# Security related/required params
+hostname = config['hostname']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+tmp_dir = Script.get_tmp_dir()
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+hive_user = config['configurations']['hive-env']['hive_user']
+webhcat_user = config['configurations']['hive-env']['webhcat_user']
+
+# default configuration directories
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+webhcat_conf_dir = '/etc/hive-webhcat/conf'
+hive_etc_dir_prefix = "/etc/hive"
+hive_conf_dir = "/etc/hive/conf"
+hive_client_conf_dir = "/etc/hive/conf"
+
+hive_server_conf_dir = "/etc/hive/conf.server"
+
+if Script.is_stack_greater_or_equal("4.1.0.0"):
+  webhcat_conf_dir = '/usr/iop/current/hive-webhcat/conf'
+  hive_conf_dir = format("/usr/iop/current/{component_directory}/conf")
+  hive_client_conf_dir = format("/usr/iop/current/{component_directory}/conf")
+  hive_server_conf_dir = format("/usr/iop/current/{component_directory}/conf/conf.server")
+
+hive_config_dir = hive_client_conf_dir
+if 'role' in config and config['role'] in ["HIVE_SERVER", "HIVE_METASTORE"]:
+  hive_config_dir = hive_server_conf_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/webhcat.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/webhcat.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/webhcat.py
new file mode 100755
index 0000000..41fe16e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/webhcat.py
@@ -0,0 +1,117 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import sys
+import os.path
+from resource_management import *
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions.version import compare_versions
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def webhcat():
+  import params
+
+  Directory(params.templeton_pid_dir,
+            owner=params.webhcat_user,
+            mode=0755,
+            group=params.user_group,
+            create_parents=True)
+
+  Directory(params.templeton_log_dir,
+            owner=params.webhcat_user,
+            mode=0755,
+            group=params.user_group,
+            create_parents=True)
+
+  Directory(params.config_dir,
+            create_parents=True,
+            owner=params.webhcat_user,
+            group=params.user_group)
+
+  if params.security_enabled:
+    kinit_if_needed = format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name};")
+  else:
+    kinit_if_needed = ""
+
+  if kinit_if_needed:
+    Execute(kinit_if_needed,
+            user=params.webhcat_user,
+            path='/bin'
+    )
+
+  # Replace _HOST with hostname in relevant principal-related properties
+  webhcat_site = params.config['configurations']['webhcat-site'].copy()
+  for prop_name in ['templeton.hive.properties', 'templeton.kerberos.principal']:
+    if prop_name in webhcat_site:
+      webhcat_site[prop_name] = webhcat_site[prop_name].replace("_HOST", params.hostname)
+
+  XmlConfig("webhcat-site.xml",
+            conf_dir=params.config_dir,
+            configurations=webhcat_site,
+            configuration_attributes=params.config['configuration_attributes']['webhcat-site'],
+            owner=params.webhcat_user,
+            group=params.user_group,
+            )
+
+  # if we're in an upgrade of a secure cluster, make sure hive-site and yarn-site are created
+  if Script.is_stack_greater_or_equal("4.1.0.0") and params.version:
+    XmlConfig("hive-site.xml",
+      conf_dir = format("/usr/iop/{version}/hive/conf"),
+      configurations = params.config['configurations']['hive-site'],
+      configuration_attributes = params.config['configuration_attributes']['hive-site'],
+      owner = params.hive_user,
+      group=params.user_group,
+      mode=0660)
+
+    XmlConfig("yarn-site.xml",
+      conf_dir = format("/usr/iop/{version}/hadoop/conf"),
+      configurations = params.config['configurations']['yarn-site'],
+      configuration_attributes = params.config['configuration_attributes']['yarn-site'],
+      owner = params.yarn_user)
+
+  File(format("{config_dir}/webhcat-env.sh"),
+       owner=params.webhcat_user,
+       group=params.user_group,
+       content=InlineTemplate(params.webhcat_env_sh_template)
+  )
+
+  Directory(params.webhcat_conf_dir,
+       cd_access='a',
+       create_parents=True
+  )
+
+  log4j_webhcat_filename = 'webhcat-log4j.properties'
+  if (params.log4j_webhcat_props != None):
+    File(format("{config_dir}/{log4j_webhcat_filename}"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.webhcat_user,
+         content=params.log4j_webhcat_props
+    )
+  elif (os.path.exists("{config_dir}/{log4j_webhcat_filename}.template")):
+    File(format("{config_dir}/{log4j_webhcat_filename}"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.webhcat_user,
+         content=StaticFile(format("{config_dir}/{log4j_webhcat_filename}.template"))
+    )


[20/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/package/scripts/params_linux.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/package/scripts/params_linux.py
new file mode 100755
index 0000000..8280a1f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/package/scripts/params_linux.py
@@ -0,0 +1,88 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+
+# server configurations
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+stack_name = default("/hostLevelParams/stack_name", None)
+
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+iop_stack_version = format_stack_version(stack_version_unformatted)
+
+# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
+version = default("/commandParams/version", None)
+
+# hadoop default parameters
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+
+# hadoop parameters for 2.2+
+pig_conf_dir = "/usr/iop/current/pig-client/conf"
+hadoop_home = stack_select.get_hadoop_dir("home")
+pig_bin_dir = '/usr/iop/current/pig-client/bin'
+
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
+user_group = config['configurations']['cluster-env']['user_group']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+pig_env_sh_template = config['configurations']['pig-env']['content']
+
+# not supporting 32 bit jdk.
+java64_home = config['hostLevelParams']['java_home']
+
+pig_properties = config['configurations']['pig-properties']['content']
+
+log4j_props = config['configurations']['pig-log4j']['content']
+
+
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
+import functools
+#create partial functions with common arguments for every HdfsResource call
+#to create hdfs directory we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
+ )

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/package/scripts/pig.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/package/scripts/pig.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/package/scripts/pig.py
new file mode 100755
index 0000000..ea1e205
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/package/scripts/pig.py
@@ -0,0 +1,61 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import os
+from resource_management import *
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def pig():
+  import params
+
+  Directory( params.pig_conf_dir,
+    create_parents = True,
+    owner = params.hdfs_user,
+    group = params.user_group
+  )
+
+  File(format("{pig_conf_dir}/pig-env.sh"),
+    owner=params.hdfs_user,
+    mode=0755,
+    content=InlineTemplate(params.pig_env_sh_template)
+  )
+
+  # pig_properties is always set to a default even if it's not in the payload
+  File(format("{params.pig_conf_dir}/pig.properties"),
+              mode=0644,
+              group=params.user_group,
+              owner=params.hdfs_user,
+              content=params.pig_properties
+  )
+
+  if (params.log4j_props != None):
+    File(format("{params.pig_conf_dir}/log4j.properties"),
+      mode=0644,
+      group=params.user_group,
+      owner=params.hdfs_user,
+      content=params.log4j_props
+    )
+  elif (os.path.exists(format("{params.pig_conf_dir}/log4j.properties"))):
+    File(format("{params.pig_conf_dir}/log4j.properties"),
+      mode=0644,
+      group=params.user_group,
+      owner=params.hdfs_user
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/package/scripts/pig_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/package/scripts/pig_client.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/package/scripts/pig_client.py
new file mode 100755
index 0000000..bd08b56
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/package/scripts/pig_client.py
@@ -0,0 +1,59 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+import os
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from pig import pig
+
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+
+class PigClient(Script):
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    pig()
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class PigClientLinux(PigClient):
+  def get_component_name(self):
+    return "hadoop-client"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
+      conf_select.select(params.stack_name, "pig", params.version)
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-client", params.version) # includes pig-client
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+if __name__ == "__main__":
+  PigClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/package/scripts/service_check.py
new file mode 100755
index 0000000..802fd74
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/PIG/package/scripts/service_check.py
@@ -0,0 +1,123 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import os
+
+from resource_management.core.resources.system import Execute, File
+from resource_management.core.source import InlineTemplate, StaticFile
+from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.version import compare_versions
+from resource_management.libraries.resources.execute_hadoop import ExecuteHadoop
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.script.script import Script
+
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+
+class PigServiceCheck(Script):
+  pass
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class PigServiceCheckLinux(PigServiceCheck):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    input_file = format('/user/{smokeuser}/passwd')
+    output_dir = format('/user/{smokeuser}/pigsmoke.out')
+
+    # cleanup output
+    params.HdfsResource(output_dir,
+                        type="directory",
+                        action="delete_on_execute",
+                        owner=params.smokeuser,
+    )
+    # re-create input. Be able to delete it if it already exists
+    params.HdfsResource(input_file,
+                        type="file",
+                        source="/etc/passwd",
+                        action="create_on_execute",
+                        owner=params.smokeuser,
+    )
+    params.HdfsResource(None, action="execute")
+
+
+
+    File( format("{tmp_dir}/pigSmoke.sh"),
+      content = StaticFile("pigSmoke.sh"),
+      mode = 0755
+    )
+
+    # check for Pig-on-M/R
+    Execute( format("pig {tmp_dir}/pigSmoke.sh"),
+      tries     = 3,
+      try_sleep = 5,
+      path      = format('{pig_bin_dir}:/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'),
+      user      = params.smokeuser,
+      logoutput = True
+    )
+
+    test_cmd = format("fs -test -e {output_dir}")
+    ExecuteHadoop( test_cmd,
+      user      = params.smokeuser,
+      conf_dir = params.hadoop_conf_dir,
+      bin_dir = params.hadoop_bin_dir
+    )
+
+    if params.iop_stack_version != "" and compare_versions(params.iop_stack_version, '4.0') >= 0:
+      # cleanup results from previous test
+      # cleanup output
+      params.HdfsResource(output_dir,
+                          type="directory",
+                          action="delete_on_execute",
+                          owner=params.smokeuser,
+      )
+      # re-create input. Be able to delete it firstly if it already exists
+      params.HdfsResource(input_file,
+                          type="file",
+                          source="/etc/passwd",
+                          action="create_on_execute",
+                          owner=params.smokeuser,
+      )
+      params.HdfsResource(None, action="execute")
+
+      if params.security_enabled:
+        kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
+        Execute(kinit_cmd,
+                user=params.smokeuser
+        )
+
+      Execute(format("pig {tmp_dir}/pigSmoke.sh"),
+        tries     = 3,
+        try_sleep = 5,
+        path      = format('{pig_bin_dir}:/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'),
+        user      = params.smokeuser,
+        logoutput = True
+      )
+
+      ExecuteHadoop(test_cmd,
+        user      = params.smokeuser,
+        conf_dir = params.hadoop_conf_dir,
+        bin_dir = params.hadoop_bin_dir
+      )
+
+if __name__ == "__main__":
+  PigServiceCheck().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/configuration/slider-client.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/configuration/slider-client.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/configuration/slider-client.xml
new file mode 100755
index 0000000..fdeceae
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/configuration/slider-client.xml
@@ -0,0 +1,60 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+
+  <!--
+    <property>
+      <name>slider.security.protocol.acl</name>
+      <value>*</value>
+      <description>When security is enabled, set appropriate acl. Default value means allow everyone.</description>
+    </property>
+    -->
+
+  <!--
+     The recommended approach is to configure slider-env.sh and set HADOOP_CONF_DIR.
+     Otherwise, appropriate configurations from hdfs-site, yarn-site, can be dropped in this file
+     for Slider client to work. The following list is not an exhaustive list but the minimal config
+     needed to interact with a non-secure cluster.
+  -->
+
+  <!--
+      <property>
+        <name>yarn.resourcemanager.address</name>
+        <value>localhost:8050</value>
+        <description>The address of the applications manager interface in the RM.</description>
+      </property>
+
+      <property>
+        <name>yarn.resourcemanager.scheduler.address</name>
+        <value>localhost:8030</value>
+        <description>The address of the scheduler interface.</description>
+      </property>
+
+      <property>
+        <name>fs.defaultFS</name>
+        <value>hdfs://localhost:8020</value>
+        <description>The name of the default file system.  Either the
+          literal string "local" or a host:port for NDFS.</description>
+      </property>
+    -->
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/configuration/slider-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/configuration/slider-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/configuration/slider-env.xml
new file mode 100755
index 0000000..d5bab25
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/configuration/slider-env.xml
@@ -0,0 +1,43 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <!-- slider-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for slider-env.sh file</description>
+    <value>
+# Set Slider-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.  Required.
+export JAVA_HOME={{java64_home}}
+# The hadoop conf directory.  Optional as slider-client.xml can be edited to add properties.
+export HADOOP_CONF_DIR={{hadoop_conf_dir}}
+    </value>
+  </property>
+
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/configuration/slider-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/configuration/slider-log4j.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/configuration/slider-log4j.xml
new file mode 100755
index 0000000..709867c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/configuration/slider-log4j.xml
@@ -0,0 +1,89 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <description>Custom log4j.properties</description>
+    <value>
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Define some default values that can be overridden by system properties
+log4j.rootLogger=INFO,stdout
+log4j.threshhold=ALL
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+
+# log layout skips stack-trace creation operations by avoiding line numbers and method
+log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} - %m%n
+
+# debug edition is much more expensive
+#log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+log4j.appender.subprocess=org.apache.log4j.ConsoleAppender
+log4j.appender.subprocess.layout=org.apache.log4j.PatternLayout
+log4j.appender.subprocess.layout.ConversionPattern=[%c{1}]: %m%n
+#log4j.logger.org.apache.slider.yarn.appmaster.SliderAppMasterer.master=INFO,subprocess
+
+# for debugging Slider
+#log4j.logger.org.apache.slider=DEBUG
+#log4j.logger.org.apache.slider=DEBUG
+
+# uncomment to debug service lifecycle issues
+#log4j.logger.org.apache.hadoop.yarn.service.launcher=DEBUG
+#log4j.logger.org.apache.hadoop.yarn.service=DEBUG
+
+# uncomment for YARN operations
+#log4j.logger.org.apache.hadoop.yarn.client=DEBUG
+
+# uncomment this to debug security problems
+#log4j.logger.org.apache.hadoop.security=DEBUG
+
+#crank back on some noise
+log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
+log4j.logger.org.apache.hadoop.hdfs=WARN
+
+
+log4j.logger.org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor=WARN
+log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl=WARN
+log4j.logger.org.apache.zookeeper=WARN
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/metainfo.xml
new file mode 100755
index 0000000..6e1addf
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/metainfo.xml
@@ -0,0 +1,135 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>SLIDER</name>
+      <displayName>Slider</displayName>
+      <comment>A framework for deploying, managing and monitoring existing distributed applications on YARN</comment>
+      <version>0.80.0</version>
+      <components>
+        <component>
+          <name>SLIDER</name>
+          <displayName>Slider</displayName>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>YARN/YARN_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/slider_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>xml</type>
+              <fileName>slider-client.xml</fileName>
+              <dictionaryName>slider-client</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>xml</type>
+              <fileName>core-site.xml</fileName>
+              <dictionaryName>core-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>xml</type>
+              <fileName>hdfs-site.xml</fileName>
+              <dictionaryName>hdfs-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>xml</type>
+              <fileName>yarn-site.xml</fileName>
+              <dictionaryName>yarn-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>slider-env.sh</fileName>
+              <dictionaryName>slider-env</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>log4j.properties</fileName>
+              <dictionaryName>slider-log4j</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>slider*</name>
+            </package>
+            <!--package>
+              <name>storm_2_2_*</name>
+            </package-->
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>slider*</name>
+            </package>
+            <!--
+            <package>
+              <name>storm-2-2-.*</name>
+            </package>
+            -->
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <requiredServices>
+        <service>YARN</service>
+        <service>HDFS</service>
+        <service>ZOOKEEPER</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>slider-log4j</config-type>
+        <config-type>slider-client</config-type>
+        <config-type>slider-env</config-type>
+      </configuration-dependencies>
+
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/package/files/hbaseSmokeVerify.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/package/files/hbaseSmokeVerify.sh b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/package/files/hbaseSmokeVerify.sh
new file mode 100755
index 0000000..19276f3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/package/files/hbaseSmokeVerify.sh
@@ -0,0 +1,34 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+conf_dir=$1
+data=$2
+hbase_cmd=$3
+echo "scan 'ambarismoketest'" | $hbase_cmd --config $conf_dir shell > /tmp/hbase_chk_verify
+cat /tmp/hbase_chk_verify
+echo "Looking for $data"
+awk '/id/,/date/' /tmp/hbase_chk_verify
+if [ "$?" -ne 0 ]
+then
+  exit 1
+fi
+
+grep -q '1 row(s)' /tmp/hbase_chk_verify

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/package/scripts/__init__.py
new file mode 100755
index 0000000..5561e10
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/package/scripts/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/package/scripts/params.py
new file mode 100755
index 0000000..ff12274
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/package/scripts/params.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions.version import format_stack_version, compare_versions
+from resource_management.libraries.functions.default import default
+from resource_management import *
+# server configurations
+config = Script.get_config()
+
+stack_name = default("/hostLevelParams/stack_name", None)
+
+# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
+version = default("/commandParams/version", None)
+
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+stack_version = format_stack_version(stack_version_unformatted)
+
+slider_bin_dir = '/usr/iop/current/slider-client/bin'
+
+#hadoop params
+
+slider_conf_dir = "/usr/iop/current/slider-client/conf"
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+smokeuser_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+kinit_path_local = functions.get_kinit_path()
+slider_env_sh_template = config['configurations']['slider-env']['content']
+
+java64_home = config['hostLevelParams']['java_home']
+log4j_props = config['configurations']['slider-log4j']['content']
+slider_cmd = format("{slider_bin_dir}/slider")
+#storm_slider_conf_dir= '/usr/iop/current/storm-slider-client/conf'
+slider_home_dir= '/usr/iop/current/slider-client'

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/package/scripts/service_check.py
new file mode 100755
index 0000000..9c45d1c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/package/scripts/service_check.py
@@ -0,0 +1,42 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+class SliderServiceCheck(Script):
+
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    smokeuser_kinit_cmd = format(
+      "{kinit_path_local} -kt {smokeuser_keytab} {smokeuser_principal};") if params.security_enabled else ""
+
+    servicecheckcmd = format("{smokeuser_kinit_cmd} {slider_cmd} list")
+
+    Execute(servicecheckcmd,
+            tries=3,
+            try_sleep=5,
+            user=params.smokeuser,
+            logoutput=True
+    )
+
+
+if __name__ == "__main__":
+  SliderServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/package/scripts/slider.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/package/scripts/slider.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/package/scripts/slider.py
new file mode 100755
index 0000000..076e451
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/package/scripts/slider.py
@@ -0,0 +1,60 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import os
+from resource_management import *
+
+def slider():
+  import params
+
+  Directory(params.slider_conf_dir,
+            create_parents=True
+  )
+
+  slider_client_config = params.config['configurations']['slider-client'] if 'configurations' in params.config and 'slider-client' in params.config['configurations'] else {}
+
+  XmlConfig("slider-client.xml",
+            conf_dir=params.slider_conf_dir,
+            configurations=slider_client_config
+  )
+
+  File(format("{slider_conf_dir}/slider-env.sh"),
+       mode=0755,
+       content=InlineTemplate(params.slider_env_sh_template)
+  )
+  """
+  Directory(params.storm_slider_conf_dir,
+            create_parents=True
+  )
+
+  File(format("{storm_slider_conf_dir}/storm-slider-env.sh"),
+       mode=0755,
+       content=Template('storm-slider-env.sh.j2')
+  )
+  """
+  if (params.log4j_props != None):
+    File(format("{params.slider_conf_dir}/log4j.properties"),
+         mode=0644,
+         content=params.log4j_props
+    )
+  elif (os.path.exists(format("{params.slider_conf_dir}/log4j.properties"))):
+    File(format("{params.slider_conf_dir}/log4j.properties"),
+         mode=0644
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/package/scripts/slider_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/package/scripts/slider_client.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/package/scripts/slider_client.py
new file mode 100755
index 0000000..11e3cd3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/package/scripts/slider_client.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from slider import slider
+
+
+class SliderClient(Script):
+
+  def get_component_name(self):
+    return "slider-client"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
+      conf_select.select(params.stack_name, "slider", params.version)
+      stack_select.select("slider-client", params.version)
+      #Execute(format("stack-select set slider-client {version}"))
+
+      # also set all of the hadoop clients since slider client is upgraded as
+      # part of the final "CLIENTS" group and we need to ensure that
+      # hadoop-client is also set
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-client", params.version)
+      #Execute(format("stack-select set hadoop-client {version}"))
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    slider()
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+
+if __name__ == "__main__":
+  SliderClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/package/templates/storm-slider-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/package/templates/storm-slider-env.sh.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/package/templates/storm-slider-env.sh.j2
new file mode 100755
index 0000000..8022a4b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SLIDER/package/templates/storm-slider-env.sh.j2
@@ -0,0 +1,38 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+export JAVA_HOME={{java64_home}}
+export SLIDER_HOME={{slider_home_dir}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/configuration/solr-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/configuration/solr-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/configuration/solr-env.xml
new file mode 100755
index 0000000..03e88c9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/configuration/solr-env.xml
@@ -0,0 +1,208 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<configuration>
+   <property>
+    <name>solr_user</name>
+    <description>User to run Solr as</description>
+    <property-type>USER</property-type>
+    <value>solr</value>
+  </property>
+
+  <property>
+    <name>solr_lib_dir</name>
+    <value>/var/lib/solr</value>
+    <description>Directory for writable Solr files and index data</description>
+  </property>
+
+  <property>
+    <name>solr_pid_dir</name>
+    <value>/var/run/solr</value>
+  </property>
+
+  <property>
+    <name>solr_log_dir</name>
+    <value>/var/log/solr</value>
+  </property>
+
+  <property>
+    <name>solr_port</name>
+    <value>8983</value>
+    <description>Sets the port Solr binds to, default is 8983</description>
+  </property>
+
+  <property>
+    <name>solr_hdfs_home_dir</name>
+    <value>/apps/solr/data</value>
+    <description>A root location in HDFS for Solr to write collection data to. Rather than specifying an HDFS location for the data directory or update log directory, use this to specify one root location and have everything automatically created within this HDFS</description>
+  </property>
+
+
+  <property>
+    <name>ZOOKEEPER_CHROOT</name>
+    <value>/solr</value>
+    <description>If you're using a ZooKeeper instance that is shared by other systems, it's recommended to isolate the SolrCloud znode tree using ZooKeeper's chroot support.
+    For instance, to ensure all znodes created by SolrCloud are stored under /solr, you can put /solr on the end of your ZK_HOST connection string, such as: ZK_HOST=zk1,zk2,zk3/solr</description>
+  </property>
+
+ <property>
+    <name>content</name>
+    <description>This is the jinja template for solr.in.sh file</description>
+    <value>
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+export JAVA_HOME={{java64_home}}
+
+# By default the script will use JAVA_HOME to determine which java
+# to use, but you can set a specific path for Solr to use without
+# affecting other Java applications on your server/workstation.
+#SOLR_JAVA_HOME=""
+
+
+
+# Increase Java Min/Max Heap as needed to support your indexing / query needs
+SOLR_JAVA_MEM="-Xms512m -Xmx512m"
+
+
+# Enable verbose GC logging
+GC_LOG_OPTS="-verbose:gc -XX:+PrintHeapAtGC -XX:+PrintGCDetails \
+-XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+PrintTenuringDistribution -XX:+PrintGCApplicationStoppedTime"
+
+
+# These GC settings have shown to work well for a number of common Solr workloads
+GC_TUNE="-XX:NewRatio=3 \
+-XX:SurvivorRatio=4 \
+-XX:TargetSurvivorRatio=90 \
+-XX:MaxTenuringThreshold=8 \
+-XX:+UseConcMarkSweepGC \
+-XX:+UseParNewGC \
+-XX:ConcGCThreads=4 -XX:ParallelGCThreads=4 \
+-XX:+CMSScavengeBeforeRemark \
+-XX:PretenureSizeThreshold=64m \
+-XX:+UseCMSInitiatingOccupancyOnly \
+-XX:CMSInitiatingOccupancyFraction=50 \
+-XX:CMSMaxAbortablePrecleanTime=6000 \
+-XX:+CMSParallelRemarkEnabled \
+-XX:+ParallelRefProcEnabled \
+-XX:MaxDirectMemorySize=20g"
+
+
+# Set the ZooKeeper connection string if using an external ZooKeeper ensemble
+# e.g. host1:2181,host2:2181/chroot
+# Leave empty if not using SolrCloud
+#ZK_HOST=""
+
+
+
+# Set the ZooKeeper client timeout (for SolrCloud mode)
+#ZK_CLIENT_TIMEOUT="15000"
+
+
+# By default the start script uses "localhost"; override the hostname here
+# for production SolrCloud environments to control the hostname exposed to cluster state
+#SOLR_HOST="192.168.1.1"
+
+# By default the start script uses UTC; override the timezone if needed
+#SOLR_TIMEZONE="UTC"
+
+# Set to true to activate the JMX RMI connector to allow remote JMX client applications
+# to monitor the JVM hosting Solr; set to "false" to disable that behavior
+# (false is recommended in production environments)
+ENABLE_REMOTE_JMX_OPTS="false"
+
+
+# The script will use SOLR_PORT+10000 for the RMI_PORT or you can set it here
+# RMI_PORT=18983
+
+
+# Anything you add to the SOLR_OPTS variable will be included in the java
+# start command line as-is, in ADDITION to other options. If you specify the
+# -a option on start script, those options will be appended as well. Examples:
+#SOLR_OPTS="$SOLR_OPTS -Dsolr.autoSoftCommit.maxTime=3000"
+#SOLR_OPTS="$SOLR_OPTS -Dsolr.autoCommit.maxTime=60000"
+#SOLR_OPTS="$SOLR_OPTS -Dsolr.clustering.enabled=true"
+
+
+
+# Location where the bin/solr script will save PID files for running instances
+# If not set, the script will create PID files in $SOLR_TIP/bin
+#SOLR_PID_DIR=
+
+
+
+# Path to a directory where Solr creates index files, the specified directory
+# must contain a solr.xml; by default, Solr will use server/solr
+#SOLR_HOME=
+
+
+
+# Solr provides a default Log4J configuration properties file in server/resources
+# however, you may want to customize the log settings and file appender location
+# so you can point the script to use a different log4j.properties file
+#LOG4J_PROPS=/var/solr/log4j.properties
+
+
+
+# Location where Solr should write logs to; should agree with the file appender
+# settings in server/resources/log4j.properties
+#SOLR_LOGS_DIR=
+
+
+
+# Sets the port Solr binds to, default is 8983
+#SOLR_PORT=8983
+
+
+
+# Uncomment to set SSL-related system properties
+# Be sure to update the paths to the correct keystore for your environment
+#SOLR_SSL_OPTS="-Djavax.net.ssl.keyStore=etc/solr-ssl.keystore.jks \
+#-Djavax.net.ssl.keyStorePassword=secret \
+#-Djavax.net.ssl.trustStore=etc/solr-ssl.keystore.jks \
+#-Djavax.net.ssl.trustStorePassword=secret"
+
+
+
+# Uncomment to set a specific SSL port (-Djetty.ssl.port=N); if not set
+# and you are using SSL, then the start script will use SOLR_PORT for the SSL port
+#SOLR_SSL_PORT=
+
+
+SOLR_PID_DIR={{pid_dir}}
+SOLR_HOME={{lib_dir}}/data
+LOG4J_PROPS={{solr_conf_dir}}/log4j.properties
+SOLR_LOGS_DIR={{log_dir}}
+SOLR_PORT={{solr_port}}
+SOLR_MODE=solrcloud
+ZK_HOST={{zookeeper_hosts_list}}{{zookeeper_chroot}}
+SOLR_HOST={{hostname}}
+
+# Comment out the following SOLR_OPTS setting to config Solr to write its index and transaction log files to local filesystem.
+# Data (index and transaction log files) exists on HDFS will not be moved to local filesystem,
+# after you change this config, they will not be available from local filesystem.
+SOLR_OPTS="-Dsolr.directoryFactory=HdfsDirectoryFactory \
+-Dsolr.lock.type=hdfs \
+-Dsolr.hdfs.confdir=/etc/hadoop/conf \
+-Dsolr.hdfs.home={{fs_root}}{{solr_hdfs_home_dir}} \
+-Dsolr.hdfs.security.kerberos.enabled={{sole_kerberos_enabled}} \
+-Dsolr.hdfs.security.kerberos.keytabfile={{solr_keytab}} \
+-Dsolr.hdfs.security.kerberos.principal={{solr_principal}}"
+
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/configuration/solr-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/configuration/solr-log4j.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/configuration/solr-log4j.xml
new file mode 100755
index 0000000..abbf9d2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/configuration/solr-log4j.xml
@@ -0,0 +1,82 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <description>Custom log4j.properties</description>
+    <value>
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+#
+# Solr Logging Configuration
+#
+
+#  Logging level
+solr.log=${solr.solr.home}/../logs
+log4j.rootLogger=INFO, file, CONSOLE
+
+log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
+
+log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
+log4j.appender.CONSOLE.layout.ConversionPattern=%-4r [%t] %-5p %c %x [%X{collection} %X{shard} %X{replica} %X{core}] \u2013 %m%n
+
+#- size rotation with log cleanup.
+log4j.appender.file=org.apache.log4j.RollingFileAppender
+log4j.appender.file.MaxFileSize=4MB
+log4j.appender.file.MaxBackupIndex=9
+
+#- File to log to and log format
+log4j.appender.file.File=${solr.log}/solr.log
+log4j.appender.file.layout=org.apache.log4j.PatternLayout
+log4j.appender.file.layout.ConversionPattern=%-5p - %d{yyyy-MM-dd HH:mm:ss.SSS}; [%X{collection} %X{shard} %X{replica} %X{core}] %C; %m\n
+
+log4j.logger.org.apache.zookeeper=WARN
+log4j.logger.org.apache.hadoop=WARN
+
+# set to INFO to enable infostream log messages
+log4j.logger.org.apache.solr.update.LoggingInfoStream=OFF
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/configuration/solr-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/configuration/solr-site.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/configuration/solr-site.xml
new file mode 100755
index 0000000..e36e633
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/configuration/solr-site.xml
@@ -0,0 +1,44 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+  <property>
+    <name>solr.hdfs.security.kerberos.enabled</name>
+    <value>false</value>
+    <description>Set to true to enable Kerberos authentication</description>
+  </property>
+
+  <property>
+    <name>solr.hdfs.security.kerberos.keytabfile</name>
+    <value>/etc/security/keytabs/solr.service.keytab</value>
+    <description>A keytab file contains pairs of Kerberos principals and encrypted keys which allows for password-less authentication when Solr attempts to authenticate with secure Hadoop.
+    This file will need to be present on all Solr servers at the same path provided in this parameter.
+    </description>
+  </property>
+
+  <property>
+    <name>solr.hdfs.security.kerberos.principal</name>
+    <value>solr/_HOST@EXAMPLE.COM</value>
+    <description>The Kerberos principal that Solr should use to authenticate to secure Hadoop; the format of a typical Kerberos V5 principal is: primary/instance@realm
+    </description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/kerberos.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/kerberos.json
new file mode 100755
index 0000000..cd46977
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/kerberos.json
@@ -0,0 +1,47 @@
+{
+  "services": [
+    {
+      "name": "SOLR",
+      "identities": [
+        {
+          "name": "/smokeuser"
+        },
+      ],
+      "configurations": [
+        {
+          "solr-site": {
+              "solr.hdfs.security.kerberos.enabled":"true"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "SOLR",
+          "identities": [
+            {
+              "name": "solr",
+              "principal": {
+                "value": "${solr-env/solr_user}/_HOST@${realm}",
+                "type": "service",
+                "configuration": "solr-site/solr.hdfs.security.kerberos.principal",
+                "local_username": "${solr-env/solr_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/solr.service.keytab",
+                "owner": {
+                  "name": "${solr-env/solr_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "solr-site/solr.hdfs.security.kerberos.keytabfile"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/metainfo.xml
new file mode 100755
index 0000000..f472263
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/metainfo.xml
@@ -0,0 +1,74 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>SOLR</name>
+      <displayName>Solr</displayName>
+      <comment>Solr is the popular, blazing fast open source enterprise search platform from the Apache Lucene project
+      </comment>
+      <version>5.1.0</version>
+
+      <components>
+        <component>
+          <name>SOLR</name>
+          <displayName>Solr</displayName>
+          <category>MASTER</category>
+          <cardinality>1+</cardinality>
+          <commandScript>
+            <script>scripts/solr_server.py</script>
+            <scriptType>PYTHON</scriptType>
+	    <timeout>600</timeout>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>xml</type>
+              <fileName>solr-site.xml</fileName>
+              <dictionaryName>solr-site</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>solr_4_1_*</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <requiredServices>
+        <service>HDFS</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>solr-log4j</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/scripts/__init__.py
new file mode 100755
index 0000000..5561e10
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/scripts/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/scripts/params.py
new file mode 100755
index 0000000..d5d90f6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/scripts/params.py
@@ -0,0 +1,182 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+from resource_management.libraries import Script
+from resource_management.libraries.functions import conf_select, stack_select
+from resource_management.libraries.resources import HdfsResource
+import status_params
+
+# server configurations
+config = Script.get_config()
+stack_name = default("/hostLevelParams/stack_name", None)
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+zookeeper_hosts = config['clusterHostInfo']['zookeeper_hosts']
+zookeeper_hosts.sort()
+zookeeper_hosts_list=','.join(zookeeper_hosts)
+
+java64_home = config['hostLevelParams']['java_home']
+
+# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade.
+# Version being upgraded/downgraded to
+# It cannot be used during the initial Cluser Install because the version is not yet known.
+version = default("/commandParams/version", None)
+
+# current host stack version
+current_version = default("/hostLevelParams/current_version", None)
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+iop_stack_version = format_stack_version(stack_version_unformatted)
+
+# Upgrade direction
+upgrade_direction = default("/commandParams/upgrade_direction", None)
+
+solr_user=config['configurations']['solr-env']['solr_user']
+user_group=config['configurations']['cluster-env']['user_group']
+hostname = config['hostname']
+solr_server_hosts = config['clusterHostInfo']['solr_hosts']
+solr_server_host = solr_server_hosts[0]
+
+fs_root = config['configurations']['core-site']['fs.defaultFS']
+
+solr_home = '/usr/iop/current/solr-server'
+solr_conf_dir='/usr/iop/current/solr-server/conf'
+solr_conf = solr_conf_dir
+
+solr_piddir = status_params.solr_pid_dir
+solr_pidfile = status_params.solr_pid_file
+
+if "solr-env" in config['configurations']:
+  #solr_hosts = config['clusterHostInfo']['solr_hosts']
+  solr_znode = default('/configurations/solr-env/solr_znode', '/solr')
+  solr_min_mem = default('/configurations/solr-env/solr_minmem', 1024)
+  solr_max_mem = default('/configurations/solr-env/solr_maxmem', 2048)
+  #solr_instance_count = len(config['clusterHostInfo']['solr_hosts'])
+  solr_datadir = default('/configurations/solr-env/solr_datadir', '/opt/solr/data')
+  #solr_data_resources_dir = os.path.join(solr_datadir, 'resources')
+  solr_jmx_port = default('/configurations/solr-env/solr_jmx_port', 18983)  
+  solr_ssl_enabled = default('configurations/solr-env/solr_ssl_enabled', False)
+  solr_keystore_location = default('/configurations/solr-env/solr_keystore_location', '/etc/security/serverKeys/solr.keyStore.jks')
+  solr_keystore_password = default('/configurations/solr-env/solr_keystore_password', 'bigdata')
+  solr_keystore_type = default('/configurations/solr-env/solr_keystore_type', 'jks')
+  #solr_truststore_location = config['configurations']['solr-env']['solr_truststore_location']
+  #solr_truststore_password = config['configurations']['solr-env']['solr_truststore_password']
+  #solr_truststore_type = config['configurations']['solr-env']['solr_truststore_type']
+  #solr_user = config['configurations']['solr-env']['solr_user']
+  solr_log_dir = config['configurations']['solr-env']['solr_log_dir']
+  solr_log = format("{solr_log_dir}/solr-install.log")
+  #solr_env_content = config['configurations']['solr-env']['content']
+  solr_hdfs_home_dir = config['configurations']['solr-env']['solr_hdfs_home_dir']
+
+zookeeper_port = default('/configurations/zoo.cfg/clientPort', None)
+# get comma separated list of zookeeper hosts from clusterHostInfo
+index = 0
+zookeeper_quorum = ""
+for host in config['clusterHostInfo']['zookeeper_hosts']:
+  zookeeper_quorum += host + ":" + str(zookeeper_port)
+  index += 1
+  if index < len(config['clusterHostInfo']['zookeeper_hosts']):
+    zookeeper_quorum += ","
+
+if compare_versions(format_stack_version(current_version), '4.2.0.0') >= 0:
+  if upgrade_direction is not None and upgrade_direction == Direction.DOWNGRADE and version is not None and compare_versions(format_stack_version(version), '4.2.0.0') < 0:
+    lib_dir=default("/configurations/solr-env/solr_lib_dir", None)
+  else:
+    lib_dir=default("/configurations/solr-env/solr_data_dir", None)
+    solr_data_dir=default("/configurations/solr-env/solr_data_dir", None)
+else: #IOP 4.1
+  if upgrade_direction is not None and upgrade_direction == Direction.UPGRADE:
+    solr_data_dir=default("/configurations/solr-env/solr_data_dir", None)
+    if not solr_data_dir:
+      solr_data_dir=default("/configurations/solr-env/solr_datadir", None)
+    lib_dir=default("/configurations/solr-env/solr_data_dir", None)
+    if not lib_dir:
+      lib_dir=default("/configurations/solr-env/solr_datadir", None)
+    old_lib_dir=default("/configurations/solr-env/solr_lib_dir", None)
+  else:
+    solr_data_dir=default("/configurations/solr-env/solr_lib_dir", None)
+    lib_dir=default("/configurations/solr-env/solr_lib_dir", None)
+log_dir=config['configurations']['solr-env']['solr_log_dir']
+pid_dir=config['configurations']['solr-env']['solr_pid_dir']
+solr_port=config['configurations']['solr-env']['solr_port']
+
+zookeeper_chroot=config['configurations']['solr-env']['ZOOKEEPER_CHROOT']
+
+solr_site = dict(config['configurations']['solr-site'])
+solr_principal = solr_site['solr.hdfs.security.kerberos.principal']
+
+if security_enabled:
+  solr_principal = solr_principal.replace('_HOST',hostname)
+  solr_site['solr.hdfs.security.kerberos.principal']=solr_principal
+
+#kerberos
+sole_kerberos_enabled=config['configurations']['solr-site']['solr.hdfs.security.kerberos.enabled']
+solr_keytab=config['configurations']['solr-site']['solr.hdfs.security.kerberos.keytabfile']
+
+#log4j.properties
+log4j_props = config['configurations']['solr-log4j']['content']
+
+solr_in_sh_template = config['configurations']['solr-env']['content']
+
+solr_pid_file = status_params.solr_pid_file
+
+solr_hdfs_home_dir = config['configurations']['solr-env']['solr_hdfs_home_dir']
+solr_hdfs_user_mode = 0775
+
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+
+if security_enabled:
+  _hostname_lowercase = config['hostname'].lower()
+  solr_jaas_file = solr_conf + '/solr_jaas.conf'
+  solr_kerberos_keytab = solr_keytab
+  solr_kerberos_principal = solr_principal #cannot use the one from solr-env, otherwise default @EXAMPLE.COM is in the real value
+  solr_web_kerberos_keytab = default('/configurations/solr-env/solr_web_kerberos_keytab', None)
+  solr_web_kerberos_principal = default('/configurations/solr-env/solr_web_kerberos_principal', None)
+  if solr_web_kerberos_principal:
+    solr_web_kerberos_principal = solr_web_kerberos_principal.replace('_HOST',_hostname_lowercase)
+  solr_kerberos_name_rules = default('/configurations/solr-env/solr_kerberos_name_rules', 'DEFAULT')
+
+kinit_path_local = get_kinit_path()
+import functools
+#create partial functions with common arguments for every HdfsDirectory call
+#to create hdfs directory we need to call params.HdfsDirectory in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
+)

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/scripts/service_check.py
new file mode 100755
index 0000000..d3add2f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/scripts/service_check.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+
+from resource_management import *
+from resource_management.libraries.functions.validate import call_and_match_output
+import subprocess
+import time
+
+class SolrServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    url = "http://" + params.solr_server_host + ":" + str(params.solr_port) + "/solr/"
+
+    if params.security_enabled:
+        kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
+        Execute(kinit_cmd,
+                user = params.smokeuser,
+                logoutput = True
+        )
+
+    create_collection_cmd = format("SOLR_INCLUDE={solr_conf_dir}/solr.in.sh solr create -c smokeuser_ExampleCollection -s 2 -d data_driven_schema_configs")
+    Logger.info("Creating solr collection from example: %s" % create_collection_cmd)
+    Execute(create_collection_cmd,
+            user = params.smokeuser,
+            logoutput = True)
+
+    list_collection_cmd = "su " + params.smokeuser + " -c 'curl -s --negotiate -u : " + url + "admin/collections?action=list'"
+    list_collection_output = "<str>smokeuser_ExampleCollection</str>"
+    Logger.info("List Collections: %s" % list_collection_cmd)
+    call_and_match_output(list_collection_cmd, format("({list_collection_output})"), "Failed to create collection \"smokeuser_ExampleCollection\" or check that collection exists")
+
+    delete_collection_cmd = format("SOLR_INCLUDE={solr_conf_dir}/solr.in.sh solr delete -c smokeuser_ExampleCollection")
+    Logger.info("Deleting solr collection : %s" % delete_collection_cmd)
+    Execute(delete_collection_cmd,
+      user = params.smokeuser,
+      logoutput=True
+    )
+
+if __name__ == "__main__":
+  SolrServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/scripts/solr.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/scripts/solr.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/scripts/solr.py
new file mode 100755
index 0000000..ae4a101
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/scripts/solr.py
@@ -0,0 +1,143 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+import sys
+import os
+
+def solr(type = None, upgrade_type=None):
+  import params
+
+  if type == 'server':
+    effective_version = params.iop_stack_version if upgrade_type is None else format_stack_version(params.version)
+
+    params.HdfsResource(params.solr_hdfs_home_dir,
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.solr_user,
+                         mode=params.solr_hdfs_user_mode
+    )
+    params.HdfsResource(None, action="execute")
+
+    Directory([params.log_dir,params.pid_dir,params.solr_conf_dir],
+              mode=0755,
+              cd_access='a',
+              owner=params.solr_user,
+              create_parents=True,
+              group=params.user_group
+      )
+
+    XmlConfig("solr-site.xml",
+              conf_dir=params.solr_conf_dir,
+              configurations=params.solr_site,
+              configuration_attributes=params.config['configuration_attributes']['solr-site'],
+              owner=params.solr_user,
+              group=params.user_group,
+              mode=0644
+    )
+
+    File(format("{solr_conf_dir}/solr.in.sh"),
+         content=InlineTemplate(params.solr_in_sh_template),
+         owner=params.solr_user,
+         group=params.user_group
+    )
+
+    File(format("{solr_conf_dir}/log4j.properties"),
+           mode=0644,
+           group=params.user_group,
+           owner=params.solr_user,
+           content=params.log4j_props
+    )
+
+    File(format("{solr_conf_dir}/log4j.properties"),
+           mode=0644,
+           group=params.user_group,
+           owner=params.solr_user,
+           content=params.log4j_props
+    )
+
+    Directory(params.lib_dir,
+              mode=0755,
+              cd_access='a',
+              owner=params.solr_user,
+              create_parents=True,
+              group=params.user_group
+    )
+
+    if effective_version is not None and effective_version != "" and compare_versions(effective_version, '4.2.0.0') >= 0:
+      File(format("{lib_dir}/solr.xml"),
+              mode=0644,
+              group=params.user_group,
+              owner=params.solr_user,
+              content=Template("solr.xml.j2")
+      )
+    else:
+      Directory(format("{lib_dir}/data"),
+              owner=params.solr_user,
+              create_parents=True,
+              group=params.user_group
+      )
+
+      File(format("{lib_dir}/data/solr.xml"),
+              mode=0644,
+              group=params.user_group,
+              owner=params.solr_user,
+              content=Template("solr.xml.j2")
+      )
+
+    #solr-webapp is temp dir, need to own by solr in order for it to wirte temp files into.
+    Directory(format("{solr_home}/server/solr-webapp"),
+              owner=params.solr_user,
+              create_parents=True,
+    )
+
+    if params.security_enabled:
+      File(format("{solr_jaas_file}"),
+           content=Template("solr_jaas.conf.j2"),
+           owner=params.solr_user)
+
+  elif type == '4103':
+    solr41_conf_dir = "/usr/iop/4.1.0.0/solr/conf"
+    solr41_etc_dir="/etc/solr/4.1.0.0/0"
+    if not os.path.exists(solr41_etc_dir):
+      Execute("mkdir -p /etc/solr/4.1.0.0/0")
+
+    content_path=solr41_conf_dir
+    if not os.path.isfile("/usr/iop/4.1.0.0/solr/conf/solr.in.sh"):
+      content_path = "/etc/solr/conf.backup"
+
+    for each in os.listdir(content_path):
+      File(os.path.join(solr41_etc_dir, each),
+           owner=params.solr_user,
+           content = StaticFile(os.path.join(content_path,each)))
+
+    if not os.path.islink(solr41_conf_dir):
+      Directory(solr41_conf_dir,
+                action="delete",
+                create_parents=True)
+
+    if os.path.islink(solr41_conf_dir):
+      os.unlink(solr41_conf_dir)
+
+    if not os.path.islink(solr41_conf_dir):
+      Link(solr41_conf_dir,
+           to=solr41_etc_dir
+      )

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/scripts/solr_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/scripts/solr_client.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/scripts/solr_client.py
new file mode 100755
index 0000000..1969c70
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SOLR/package/scripts/solr_client.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+class SolrClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+
+
+  def configure(self, env):
+    print 'Configure the solr client';
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+  SolrClient().execute()


[08/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_23.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_23.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_23.py
new file mode 100755
index 0000000..d8b6729
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_23.py
@@ -0,0 +1,995 @@
+#!/usr/bin/env ambari-python-wrap
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+# Python Imports
+import os
+import re
+import fnmatch
+import math
+import socket
+
+# Local Imports
+from resource_management.core.logger import Logger
+
+try:
+  from stack_advisor_22 import *
+except ImportError:
+  #Ignore ImportError
+  print("stack_advisor_22 not found")
+
+DB_TYPE_DEFAULT_PORT_MAP = {"MYSQL":"3306", "ORACLE":"1521", "POSTGRES":"5432", "MSSQL":"1433", "SQLA":"2638"}
+
+class HDP23StackAdvisor(HDP22StackAdvisor):
+
+  def __init__(self):
+    super(HDP23StackAdvisor, self).__init__()
+    Logger.initialize_logger()
+
+  def getComponentLayoutValidations(self, services, hosts):
+    parentItems = super(HDP23StackAdvisor, self).getComponentLayoutValidations(services, hosts)
+
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    componentsListList = [service["components"] for service in services["services"]]
+    componentsList = [item["StackServiceComponents"] for sublist in componentsListList for item in sublist]
+    childItems = []
+
+    if "SPARK" in servicesList:
+      if "SPARK_THRIFTSERVER" in servicesList:
+        if not "HIVE_SERVER" in servicesList:
+          message = "SPARK_THRIFTSERVER requires HIVE services to be selected."
+          childItems.append( {"type": 'host-component', "level": 'ERROR', "message": message, "component-name": 'SPARK_THRIFTSERVER'} )
+
+      hmsHosts = self.__getHosts(componentsList, "HIVE_METASTORE") if "HIVE" in servicesList else []
+      sparkTsHosts = self.__getHosts(componentsList, "SPARK_THRIFTSERVER") if "SPARK" in servicesList else []
+
+      # if Spark Thrift Server is deployed but no Hive Server is deployed
+      if len(sparkTsHosts) > 0 and len(hmsHosts) == 0:
+        message = "SPARK_THRIFTSERVER requires HIVE_METASTORE to be selected/deployed."
+        childItems.append( { "type": 'host-component', "level": 'ERROR', "message": message, "component-name": 'SPARK_THRIFTSERVER' } )
+
+    parentItems.extend(childItems)
+    return parentItems
+
+  def __getHosts(self, componentsList, componentName):
+    host_lists = [component["hostnames"] for component in componentsList if
+                  component["component_name"] == componentName]
+    if host_lists and len(host_lists) > 0:
+      return host_lists[0]
+    else:
+      return []
+
+  def getServiceConfigurationRecommenderDict(self):
+    parentRecommendConfDict = super(HDP23StackAdvisor, self).getServiceConfigurationRecommenderDict()
+    childRecommendConfDict = {
+      "TEZ": self.recommendTezConfigurations,
+      "HDFS": self.recommendHDFSConfigurations,
+      "YARN": self.recommendYARNConfigurations,
+      "HIVE": self.recommendHIVEConfigurations,
+      "HBASE": self.recommendHBASEConfigurations,
+      "KAFKA": self.recommendKAFKAConfigurations,
+      "RANGER": self.recommendRangerConfigurations,
+      "RANGER_KMS": self.recommendRangerKMSConfigurations,
+      "STORM": self.recommendStormConfigurations,
+      "SQOOP": self.recommendSqoopConfigurations
+    }
+    parentRecommendConfDict.update(childRecommendConfDict)
+    return parentRecommendConfDict
+
+  def recommendTezConfigurations(self, configurations, clusterData, services, hosts):
+    super(HDP23StackAdvisor, self).recommendTezConfigurations(configurations, clusterData, services, hosts)
+
+    putTezProperty = self.putProperty(configurations, "tez-site")
+
+    if "HIVE" in self.getServiceNames(services):
+      if not "hive-site" in configurations:
+        self.recommendHIVEConfigurations(configurations, clusterData, services, hosts)
+
+      if "hive-site" in configurations and "hive.tez.container.size" in configurations["hive-site"]["properties"]:
+        putTezProperty("tez.task.resource.memory.mb", configurations["hive-site"]["properties"]["hive.tez.container.size"])
+
+    # remove 2gb limit for tez.runtime.io.sort.mb
+    # in HDP 2.3 "tez.runtime.sorter.class" is set by default to PIPELINED, in other case comment calculation code below
+    taskResourceMemory = int(configurations["tez-site"]["properties"]["tez.task.resource.memory.mb"])
+    # fit io.sort.mb into tenured regions
+    putTezProperty("tez.runtime.io.sort.mb", int(taskResourceMemory * 0.8 * 0.33))
+
+    if "tez-site" in services["configurations"] and "tez.runtime.sorter.class" in services["configurations"]["tez-site"]["properties"]:
+      if services["configurations"]["tez-site"]["properties"]["tez.runtime.sorter.class"] == "LEGACY":
+        putTezAttribute = self.putPropertyAttribute(configurations, "tez-site")
+        putTezAttribute("tez.runtime.io.sort.mb", "maximum", 1800)
+    pass
+
+    serverProperties = services["ambari-server-properties"]
+    latest_tez_jar_version = None
+
+    server_host = socket.getfqdn()
+    for host in hosts["items"]:
+      if server_host == host["Hosts"]["host_name"]:
+        server_host = host["Hosts"]["public_host_name"]
+    server_port = '8080'
+    server_protocol = 'http'
+    views_dir = '/var/lib/ambari-server/resources/views/'
+
+    if serverProperties:
+      if 'client.api.port' in serverProperties:
+        server_port = serverProperties['client.api.port']
+      if 'views.dir' in serverProperties:
+        views_dir = serverProperties['views.dir']
+      if 'api.ssl' in serverProperties:
+        if serverProperties['api.ssl'].lower() == 'true':
+          server_protocol = 'https'
+
+      views_work_dir = os.path.join(views_dir, 'work')
+
+      if os.path.exists(views_work_dir) and os.path.isdir(views_work_dir):
+        last_version = '0.0.0'
+        for file in os.listdir(views_work_dir):
+          if fnmatch.fnmatch(file, 'TEZ{*}'):
+            current_version = file.lstrip("TEZ{").rstrip("}") # E.g.: TEZ{0.7.0.2.3.0.0-2154}
+            if self.versionCompare(current_version.replace("-", "."), last_version.replace("-", ".")) >= 0:
+              latest_tez_jar_version = current_version
+              last_version = current_version
+            pass
+        pass
+      pass
+    pass
+
+    if latest_tez_jar_version:
+      tez_url = '{0}://{1}:{2}/#/main/views/TEZ/{3}/TEZ_CLUSTER_INSTANCE'.format(server_protocol, server_host, server_port, latest_tez_jar_version)
+      putTezProperty("tez.tez-ui.history-url.base", tez_url)
+    pass
+
+    # TEZ JVM options
+    jvmGCParams = "-XX:+UseParallelGC"
+    if "ambari-server-properties" in services and "java.home" in services["ambari-server-properties"]:
+      # JDK8 needs different parameters
+      match = re.match(".*\/jdk(1\.\d+)[\-\_\.][^/]*$", services["ambari-server-properties"]["java.home"])
+      if match and len(match.groups()) > 0:
+        # Is version >= 1.8
+        versionSplits = re.split("\.", match.group(1))
+        if versionSplits and len(versionSplits) > 1 and int(versionSplits[0]) > 0 and int(versionSplits[1]) > 7:
+          jvmGCParams = "-XX:+UseG1GC -XX:+ResizeTLAB"
+    putTezProperty('tez.am.launch.cmd-opts', "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA " + jvmGCParams)
+    putTezProperty('tez.task.launch.cmd-opts', "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA " + jvmGCParams)
+
+
+  def recommendHBASEConfigurations(self, configurations, clusterData, services, hosts):
+    super(HDP23StackAdvisor, self).recommendHBASEConfigurations(configurations, clusterData, services, hosts)
+    putHbaseSiteProperty = self.putProperty(configurations, "hbase-site", services)
+    putHbaseSitePropertyAttributes = self.putPropertyAttribute(configurations, "hbase-site")
+    putHbaseEnvProperty = self.putProperty(configurations, "hbase-env", services)
+    putHbaseEnvPropertyAttributes = self.putPropertyAttribute(configurations, "hbase-env")
+
+    # bucket cache for 1.x is configured slightly differently, HBASE-11520
+    threshold = 23 # 2 Gb is reserved for other offheap memory
+    if (int(clusterData["hbaseRam"]) > threshold):
+      # To enable cache - calculate values
+      regionserver_total_ram = int(clusterData["hbaseRam"]) * 1024
+      regionserver_heap_size = 20480
+      regionserver_max_direct_memory_size = regionserver_total_ram - regionserver_heap_size
+      hfile_block_cache_size = '0.4'
+      block_cache_heap = 8192 # int(regionserver_heap_size * hfile_block_cache_size)
+      hbase_regionserver_global_memstore_size = '0.4'
+      reserved_offheap_memory = 2048
+      bucketcache_offheap_memory = regionserver_max_direct_memory_size - reserved_offheap_memory
+      hbase_bucketcache_size = bucketcache_offheap_memory
+
+      # Set values in hbase-site
+      putHbaseSiteProperty('hfile.block.cache.size', hfile_block_cache_size)
+      putHbaseSiteProperty('hbase.regionserver.global.memstore.size', hbase_regionserver_global_memstore_size)
+      putHbaseSiteProperty('hbase.bucketcache.ioengine', 'offheap')
+      putHbaseSiteProperty('hbase.bucketcache.size', hbase_bucketcache_size)
+      # 2.2 stack method was called earlier, unset
+      putHbaseSitePropertyAttributes('hbase.bucketcache.percentage.in.combinedcache', 'delete', 'true')
+
+      # Enable in hbase-env
+      putHbaseEnvProperty('hbase_max_direct_memory_size', regionserver_max_direct_memory_size)
+      putHbaseEnvProperty('hbase_regionserver_heapsize', regionserver_heap_size)
+    else:
+      # Disable
+      putHbaseSitePropertyAttributes('hbase.bucketcache.ioengine', 'delete', 'true')
+      putHbaseSitePropertyAttributes('hbase.bucketcache.size', 'delete', 'true')
+      putHbaseSitePropertyAttributes('hbase.bucketcache.percentage.in.combinedcache', 'delete', 'true')
+
+      putHbaseEnvPropertyAttributes('hbase_max_direct_memory_size', 'delete', 'true')
+
+    if 'hbase-env' in services['configurations'] and 'phoenix_sql_enabled' in services['configurations']['hbase-env']['properties'] and \
+       'true' == services['configurations']['hbase-env']['properties']['phoenix_sql_enabled'].lower():
+      putHbaseSiteProperty("hbase.rpc.controllerfactory.class", "org.apache.hadoop.hbase.ipc.controller.ServerRpcControllerFactory")
+      putHbaseSiteProperty("hbase.region.server.rpc.scheduler.factory.class", "org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory")
+    else:
+      putHbaseSitePropertyAttributes('hbase.region.server.rpc.scheduler.factory.class', 'delete', 'true')
+
+  def recommendHIVEConfigurations(self, configurations, clusterData, services, hosts):
+    super(HDP23StackAdvisor, self).recommendHIVEConfigurations(configurations, clusterData, services, hosts)
+    putHiveSiteProperty = self.putProperty(configurations, "hive-site", services)
+    putHiveServerProperty = self.putProperty(configurations, "hiveserver2-site", services)
+    putHiveSitePropertyAttribute = self.putPropertyAttribute(configurations, "hive-site")
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    # hive_security_authorization == 'ranger'
+    if str(configurations["hive-env"]["properties"]["hive_security_authorization"]).lower() == "ranger":
+      putHiveServerProperty("hive.security.authorization.manager", "org.apache.ranger.authorization.hive.authorizer.RangerHiveAuthorizerFactory")
+
+    # TEZ JVM options
+    if "TEZ" in self.getServiceNames(services):
+      jvmGCParams = "-XX:+UseParallelGC"
+      if "ambari-server-properties" in services and "java.home" in services["ambari-server-properties"]:
+        # JDK8 needs different parameters
+        match = re.match(".*\/jdk(1\.\d+)[\-\_\.][^/]*$", services["ambari-server-properties"]["java.home"])
+        if match and len(match.groups()) > 0:
+          # Is version >= 1.8
+          versionSplits = re.split("\.", match.group(1))
+          if versionSplits and len(versionSplits) > 1 and int(versionSplits[0]) > 0 and int(versionSplits[1]) > 7:
+            jvmGCParams = "-XX:+UseG1GC -XX:+ResizeTLAB"
+      putHiveSiteProperty('hive.tez.java.opts', "-server -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA " + jvmGCParams + " -XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps")
+
+    # if hive using sqla db, then we should add DataNucleus property
+    sqla_db_used = 'hive-env' in services['configurations'] and 'hive_database' in services['configurations']['hive-env']['properties'] and \
+                   services['configurations']['hive-env']['properties']['hive_database'] == 'Existing SQL Anywhere Database'
+    if sqla_db_used:
+      putHiveSiteProperty('datanucleus.rdbms.datastoreAdapterClassName','org.datanucleus.store.rdbms.adapter.SQLAnywhereAdapter')
+    else:
+      putHiveSitePropertyAttribute('datanucleus.rdbms.datastoreAdapterClassName', 'delete', 'true')
+
+    # Atlas
+    hooks_property = "hive.exec.post.hooks"
+    atlas_hook_class = "org.apache.atlas.hive.hook.HiveHook"
+    if hooks_property in configurations["hive-site"]["properties"]:
+      hooks_value = configurations["hive-site"]["properties"][hooks_property]
+    else:
+      hooks_value = ""
+
+
+    hive_hooks = [x.strip() for x in hooks_value.split(",")]
+    hive_hooks = [x for x in hive_hooks if x != ""]
+    is_atlas_present_in_cluster = "ATLAS" in servicesList
+
+    if is_atlas_present_in_cluster:
+      # Append atlas hook if not already present.
+      is_atlas_hook_in_config = atlas_hook_class in hive_hooks
+      if not is_atlas_hook_in_config:
+        hive_hooks.append(atlas_hook_class)
+    else:
+      # Remove the atlas hook since Atlas service is not present.
+      hive_hooks = [x for x in hive_hooks if x != atlas_hook_class]
+
+    # Convert hive_hooks back to a csv, unless there are 0 elements, which should be " "
+    hooks_value = " " if len(hive_hooks) == 0 else ",".join(hive_hooks)
+    putHiveSiteProperty(hooks_property, hooks_value)
+
+    # This is no longer used in HDP 2.5, but still needed in HDP 2.3 and 2.4
+    atlas_server_host_info = self.getHostWithComponent("ATLAS", "ATLAS_SERVER", services, hosts)
+    if is_atlas_present_in_cluster and atlas_server_host_info:
+      atlas_rest_host = atlas_server_host_info['Hosts']['host_name']
+      scheme = "http"
+      metadata_port = "21000"
+      atlas_server_default_https_port = "21443"
+      tls_enabled = "false"
+      if 'application-properties' in services['configurations']:
+        if 'atlas.enableTLS' in services['configurations']['application-properties']['properties']:
+          tls_enabled = services['configurations']['application-properties']['properties']['atlas.enableTLS']
+        if 'atlas.server.http.port' in services['configurations']['application-properties']['properties']:
+          metadata_port = services['configurations']['application-properties']['properties']['atlas.server.http.port']
+        if tls_enabled.lower() == "true":
+          scheme = "https"
+          if 'atlas.server.https.port' in services['configurations']['application-properties']['properties']:
+            metadata_port =  services['configurations']['application-properties']['properties']['atlas.server.https.port']
+          else:
+            metadata_port = atlas_server_default_https_port
+      putHiveSiteProperty('atlas.rest.address', '{0}://{1}:{2}'.format(scheme, atlas_rest_host, metadata_port))
+    else:
+      putHiveSitePropertyAttribute('atlas.cluster.name', 'delete', 'true')
+      putHiveSitePropertyAttribute('atlas.rest.address', 'delete', 'true')
+
+  def recommendHDFSConfigurations(self, configurations, clusterData, services, hosts):
+    super(HDP23StackAdvisor, self).recommendHDFSConfigurations(configurations, clusterData, services, hosts)
+
+    putHdfsSiteProperty = self.putProperty(configurations, "hdfs-site", services)
+    putHdfsSitePropertyAttribute = self.putPropertyAttribute(configurations, "hdfs-site")
+
+    if ('ranger-hdfs-plugin-properties' in services['configurations']) and ('ranger-hdfs-plugin-enabled' in services['configurations']['ranger-hdfs-plugin-properties']['properties']):
+      rangerPluginEnabled = ''
+      if 'ranger-hdfs-plugin-properties' in configurations and 'ranger-hdfs-plugin-enabled' in  configurations['ranger-hdfs-plugin-properties']['properties']:
+        rangerPluginEnabled = configurations['ranger-hdfs-plugin-properties']['properties']['ranger-hdfs-plugin-enabled']
+      elif 'ranger-hdfs-plugin-properties' in services['configurations'] and 'ranger-hdfs-plugin-enabled' in services['configurations']['ranger-hdfs-plugin-properties']['properties']:
+        rangerPluginEnabled = services['configurations']['ranger-hdfs-plugin-properties']['properties']['ranger-hdfs-plugin-enabled']
+
+      if rangerPluginEnabled and (rangerPluginEnabled.lower() == 'Yes'.lower()):
+        putHdfsSiteProperty("dfs.namenode.inode.attributes.provider.class",'org.apache.ranger.authorization.hadoop.RangerHdfsAuthorizer')
+      else:
+        putHdfsSitePropertyAttribute('dfs.namenode.inode.attributes.provider.class', 'delete', 'true')
+    else:
+      putHdfsSitePropertyAttribute('dfs.namenode.inode.attributes.provider.class', 'delete', 'true')
+
+  def recommendKAFKAConfigurations(self, configurations, clusterData, services, hosts):
+
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    kafka_broker = getServicesSiteProperties(services, "kafka-broker")
+
+    security_enabled = self.isSecurityEnabled(services)
+
+    putKafkaBrokerProperty = self.putProperty(configurations, "kafka-broker", services)
+    putKafkaLog4jProperty = self.putProperty(configurations, "kafka-log4j", services)
+    putKafkaBrokerAttributes = self.putPropertyAttribute(configurations, "kafka-broker")
+
+    if security_enabled:
+      kafka_env = getServicesSiteProperties(services, "kafka-env")
+      kafka_user = kafka_env.get('kafka_user') if kafka_env is not None else None
+
+      if kafka_user is not None:
+        kafka_super_users = kafka_broker.get('super.users') if kafka_broker is not None else None
+
+        # kafka_super_super_users is expected to be formatted as:  User:user1;User:user2
+        if kafka_super_users is not None and kafka_super_users != '':
+          # Parse kafka_super_users to get a set of unique user names and rebuild the property value
+          user_names = set()
+          user_names.add(kafka_user)
+          for match in re.findall('User:([^;]*)', kafka_super_users):
+            user_names.add(match)
+          kafka_super_users = 'User:' + ";User:".join(user_names)
+        else:
+          kafka_super_users = 'User:' + kafka_user
+
+        putKafkaBrokerProperty("super.users", kafka_super_users)
+
+      putKafkaBrokerProperty("principal.to.local.class", "kafka.security.auth.KerberosPrincipalToLocal")
+      putKafkaBrokerProperty("security.inter.broker.protocol", "PLAINTEXTSASL")
+      putKafkaBrokerProperty("zookeeper.set.acl", "true")
+
+    else:  # not security_enabled
+      # remove unneeded properties
+      putKafkaBrokerAttributes('super.users', 'delete', 'true')
+      putKafkaBrokerAttributes('principal.to.local.class', 'delete', 'true')
+      putKafkaBrokerAttributes('security.inter.broker.protocol', 'delete', 'true')
+
+    # Update ranger-kafka-plugin-properties/ranger-kafka-plugin-enabled to match ranger-env/ranger-kafka-plugin-enabled
+    if "ranger-env" in services["configurations"] \
+      and "ranger-kafka-plugin-properties" in services["configurations"] \
+      and "ranger-kafka-plugin-enabled" in services["configurations"]["ranger-env"]["properties"]:
+      putKafkaRangerPluginProperty = self.putProperty(configurations, "ranger-kafka-plugin-properties", services)
+      ranger_kafka_plugin_enabled = services["configurations"]["ranger-env"]["properties"]["ranger-kafka-plugin-enabled"]
+      putKafkaRangerPluginProperty("ranger-kafka-plugin-enabled", ranger_kafka_plugin_enabled)
+
+    # Determine if the Ranger/Kafka Plugin is enabled
+    ranger_plugin_enabled = "RANGER" in servicesList
+    # Only if the RANGER service is installed....
+    if ranger_plugin_enabled:
+      #setting Intial value to False, which will avoid continuing as true if below conditions are not matched,
+      # And set authorizer.class.name to RangerKafkaAuthorizer
+      ranger_plugin_enabled = False
+
+      # If ranger-kafka-plugin-properties/ranger-kafka-plugin-enabled,
+      # determine if the Ranger/Kafka plug-in enabled enabled or not
+      if 'ranger-kafka-plugin-properties' in configurations and \
+          'ranger-kafka-plugin-enabled' in configurations['ranger-kafka-plugin-properties']['properties']:
+        ranger_plugin_enabled = configurations['ranger-kafka-plugin-properties']['properties']['ranger-kafka-plugin-enabled'].lower() == 'yes'
+      # If ranger-kafka-plugin-properties/ranger-kafka-plugin-enabled was not changed,
+      # determine if the Ranger/Kafka plug-in enabled enabled or not
+      elif 'ranger-kafka-plugin-properties' in services['configurations'] and \
+          'ranger-kafka-plugin-enabled' in services['configurations']['ranger-kafka-plugin-properties']['properties']:
+        ranger_plugin_enabled = services['configurations']['ranger-kafka-plugin-properties']['properties']['ranger-kafka-plugin-enabled'].lower() == 'yes'
+
+    # Determine the value for kafka-broker/authorizer.class.name
+    if ranger_plugin_enabled:
+      # If the Ranger plugin for Kafka is enabled, set authorizer.class.name to
+      # "org.apache.ranger.authorization.kafka.authorizer.RangerKafkaAuthorizer" whether Kerberos is
+      # enabled or not.
+      putKafkaBrokerProperty("authorizer.class.name", 'org.apache.ranger.authorization.kafka.authorizer.RangerKafkaAuthorizer')
+    elif security_enabled:
+      putKafkaBrokerProperty("authorizer.class.name", 'kafka.security.auth.SimpleAclAuthorizer')
+    else:
+      putKafkaBrokerAttributes('authorizer.class.name', 'delete', 'true')
+
+    #If AMS is part of Services, use the KafkaTimelineMetricsReporter for metric reporting. Default is ''.
+    if "AMBARI_METRICS" in servicesList:
+      putKafkaBrokerProperty('kafka.metrics.reporters', 'org.apache.hadoop.metrics2.sink.kafka.KafkaTimelineMetricsReporter')
+
+    if ranger_plugin_enabled:
+      kafkaLog4jRangerLines = [{
+        "name": "log4j.appender.rangerAppender",
+        "value": "org.apache.log4j.DailyRollingFileAppender"
+        },
+        {
+          "name": "log4j.appender.rangerAppender.DatePattern",
+          "value": "'.'yyyy-MM-dd-HH"
+        },
+        {
+          "name": "log4j.appender.rangerAppender.File",
+          "value": "${kafka.logs.dir}/ranger_kafka.log"
+        },
+        {
+          "name": "log4j.appender.rangerAppender.layout",
+          "value": "org.apache.log4j.PatternLayout"
+        },
+        {
+          "name": "log4j.appender.rangerAppender.layout.ConversionPattern",
+          "value": "%d{ISO8601} %p [%t] %C{6} (%F:%L) - %m%n"
+        },
+        {
+          "name": "log4j.logger.org.apache.ranger",
+          "value": "INFO, rangerAppender"
+        }]
+
+      # change kafka-log4j when ranger plugin is installed
+      if 'kafka-log4j' in services['configurations'] and 'content' in services['configurations']['kafka-log4j']['properties']:
+        kafkaLog4jContent = services['configurations']['kafka-log4j']['properties']['content']
+        for item in range(len(kafkaLog4jRangerLines)):
+          if kafkaLog4jRangerLines[item]["name"] not in kafkaLog4jContent:
+            kafkaLog4jContent+= '\n' + kafkaLog4jRangerLines[item]["name"] + '=' + kafkaLog4jRangerLines[item]["value"]
+        putKafkaLog4jProperty("content",kafkaLog4jContent)
+
+  def recommendRangerKMSConfigurations(self, configurations, clusterData, services, hosts):
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    putRangerKmsDbksProperty = self.putProperty(configurations, "dbks-site", services)
+    putRangerKmsProperty = self.putProperty(configurations, "kms-properties", services)
+    kmsEnvProperties = getSiteProperties(services['configurations'], 'kms-env')
+    putCoreSiteProperty = self.putProperty(configurations, "core-site", services)
+    putCoreSitePropertyAttribute = self.putPropertyAttribute(configurations, "core-site")
+    putRangerKmsAuditProperty = self.putProperty(configurations, "ranger-kms-audit", services)
+
+    if 'kms-properties' in services['configurations'] and ('DB_FLAVOR' in services['configurations']['kms-properties']['properties']):
+
+      rangerKmsDbFlavor = services['configurations']["kms-properties"]["properties"]["DB_FLAVOR"]
+
+      if ('db_host' in services['configurations']['kms-properties']['properties']) and ('db_name' in services['configurations']['kms-properties']['properties']):
+
+        rangerKmsDbHost =   services['configurations']["kms-properties"]["properties"]["db_host"]
+        rangerKmsDbName =   services['configurations']["kms-properties"]["properties"]["db_name"]
+
+        ranger_kms_db_url_dict = {
+          'MYSQL': {'ranger.ks.jpa.jdbc.driver': 'com.mysql.jdbc.Driver',
+                    'ranger.ks.jpa.jdbc.url': 'jdbc:mysql://' + self.getDBConnectionHostPort(rangerKmsDbFlavor, rangerKmsDbHost) + '/' + rangerKmsDbName},
+          'ORACLE': {'ranger.ks.jpa.jdbc.driver': 'oracle.jdbc.driver.OracleDriver',
+                     'ranger.ks.jpa.jdbc.url': 'jdbc:oracle:thin:@' + self.getOracleDBConnectionHostPort(rangerKmsDbFlavor, rangerKmsDbHost, rangerKmsDbName)},
+          'POSTGRES': {'ranger.ks.jpa.jdbc.driver': 'org.postgresql.Driver',
+                       'ranger.ks.jpa.jdbc.url': 'jdbc:postgresql://' + self.getDBConnectionHostPort(rangerKmsDbFlavor, rangerKmsDbHost) + '/' + rangerKmsDbName},
+          'MSSQL': {'ranger.ks.jpa.jdbc.driver': 'com.microsoft.sqlserver.jdbc.SQLServerDriver',
+                    'ranger.ks.jpa.jdbc.url': 'jdbc:sqlserver://' + self.getDBConnectionHostPort(rangerKmsDbFlavor, rangerKmsDbHost) + ';databaseName=' + rangerKmsDbName},
+          'SQLA': {'ranger.ks.jpa.jdbc.driver': 'sap.jdbc4.sqlanywhere.IDriver',
+                   'ranger.ks.jpa.jdbc.url': 'jdbc:sqlanywhere:host=' + self.getDBConnectionHostPort(rangerKmsDbFlavor, rangerKmsDbHost) + ';database=' + rangerKmsDbName}
+        }
+
+        rangerKmsDbProperties = ranger_kms_db_url_dict.get(rangerKmsDbFlavor, ranger_kms_db_url_dict['MYSQL'])
+        for key in rangerKmsDbProperties:
+          putRangerKmsDbksProperty(key, rangerKmsDbProperties.get(key))
+
+    if kmsEnvProperties and self.checkSiteProperties(kmsEnvProperties, 'kms_user') and 'KERBEROS' in servicesList:
+      kmsUser = kmsEnvProperties['kms_user']
+      kmsUserOld = getOldValue(self, services, 'kms-env', 'kms_user')
+      putCoreSiteProperty('hadoop.proxyuser.{0}.groups'.format(kmsUser), '*')
+      if kmsUserOld is not None and kmsUser != kmsUserOld:
+        putCoreSitePropertyAttribute("hadoop.proxyuser.{0}.groups".format(kmsUserOld), 'delete', 'true')
+        services["forced-configurations"].append({"type" : "core-site", "name" : "hadoop.proxyuser.{0}.groups".format(kmsUserOld)})
+        services["forced-configurations"].append({"type" : "core-site", "name" : "hadoop.proxyuser.{0}.groups".format(kmsUser)})
+
+    if "HDFS" in servicesList:
+      if 'core-site' in services['configurations'] and ('fs.defaultFS' in services['configurations']['core-site']['properties']):
+        default_fs = services['configurations']['core-site']['properties']['fs.defaultFS']
+        putRangerKmsAuditProperty('xasecure.audit.destination.hdfs.dir', '{0}/{1}/{2}'.format(default_fs,'ranger','audit'))
+
+
+  def getOracleDBConnectionHostPort(self, db_type, db_host, rangerDbName):
+    connection_string = self.getDBConnectionHostPort(db_type, db_host)
+    colon_count = db_host.count(':')
+    if colon_count == 1 and '/' in db_host:
+      connection_string = "//" + connection_string
+    elif colon_count == 0 or colon_count == 1:
+      connection_string = "//" + connection_string + "/" + rangerDbName if rangerDbName else "//" + connection_string
+
+    return connection_string
+
+  def getDBConnectionHostPort(self, db_type, db_host):
+    connection_string = ""
+    if db_type is None or db_type == "":
+      return connection_string
+    else:
+      colon_count = db_host.count(':')
+      if colon_count == 0:
+        if DB_TYPE_DEFAULT_PORT_MAP.has_key(db_type):
+          connection_string = db_host + ":" + DB_TYPE_DEFAULT_PORT_MAP[db_type]
+        else:
+          connection_string = db_host
+      elif colon_count == 1:
+        connection_string = db_host
+      elif colon_count == 2:
+        connection_string = db_host
+
+    return connection_string
+
+
+  def recommendRangerConfigurations(self, configurations, clusterData, services, hosts):
+    super(HDP23StackAdvisor, self).recommendRangerConfigurations(configurations, clusterData, services, hosts)
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    putRangerAdminProperty = self.putProperty(configurations, "ranger-admin-site", services)
+    putRangerEnvProperty = self.putProperty(configurations, "ranger-env", services)
+    putRangerUgsyncSite = self.putProperty(configurations, "ranger-ugsync-site", services)
+
+    if 'admin-properties' in services['configurations'] and ('DB_FLAVOR' in services['configurations']['admin-properties']['properties'])\
+      and ('db_host' in services['configurations']['admin-properties']['properties']) and ('db_name' in services['configurations']['admin-properties']['properties']):
+
+      rangerDbFlavor = services['configurations']["admin-properties"]["properties"]["DB_FLAVOR"]
+      rangerDbHost =   services['configurations']["admin-properties"]["properties"]["db_host"]
+      rangerDbName =   services['configurations']["admin-properties"]["properties"]["db_name"]
+      ranger_db_url_dict = {
+        'MYSQL': {'ranger.jpa.jdbc.driver': 'com.mysql.jdbc.Driver',
+                  'ranger.jpa.jdbc.url': 'jdbc:mysql://' + self.getDBConnectionHostPort(rangerDbFlavor, rangerDbHost) + '/' + rangerDbName},
+        'ORACLE': {'ranger.jpa.jdbc.driver': 'oracle.jdbc.driver.OracleDriver',
+                   'ranger.jpa.jdbc.url': 'jdbc:oracle:thin:@' + self.getOracleDBConnectionHostPort(rangerDbFlavor, rangerDbHost, rangerDbName)},
+        'POSTGRES': {'ranger.jpa.jdbc.driver': 'org.postgresql.Driver',
+                     'ranger.jpa.jdbc.url': 'jdbc:postgresql://' + self.getDBConnectionHostPort(rangerDbFlavor, rangerDbHost) + '/' + rangerDbName},
+        'MSSQL': {'ranger.jpa.jdbc.driver': 'com.microsoft.sqlserver.jdbc.SQLServerDriver',
+                  'ranger.jpa.jdbc.url': 'jdbc:sqlserver://' + self.getDBConnectionHostPort(rangerDbFlavor, rangerDbHost) + ';databaseName=' + rangerDbName},
+        'SQLA': {'ranger.jpa.jdbc.driver': 'sap.jdbc4.sqlanywhere.IDriver',
+                 'ranger.jpa.jdbc.url': 'jdbc:sqlanywhere:host=' + self.getDBConnectionHostPort(rangerDbFlavor, rangerDbHost) + ';database=' + rangerDbName}
+      }
+      rangerDbProperties = ranger_db_url_dict.get(rangerDbFlavor, ranger_db_url_dict['MYSQL'])
+      for key in rangerDbProperties:
+        putRangerAdminProperty(key, rangerDbProperties.get(key))
+
+      if 'admin-properties' in services['configurations'] and ('DB_FLAVOR' in services['configurations']['admin-properties']['properties']) \
+        and ('db_host' in services['configurations']['admin-properties']['properties']):
+
+        rangerDbFlavor = services['configurations']["admin-properties"]["properties"]["DB_FLAVOR"]
+        rangerDbHost =   services['configurations']["admin-properties"]["properties"]["db_host"]
+        ranger_db_privelege_url_dict = {
+          'MYSQL': {'ranger_privelege_user_jdbc_url': 'jdbc:mysql://' + self.getDBConnectionHostPort(rangerDbFlavor, rangerDbHost)},
+          'ORACLE': {'ranger_privelege_user_jdbc_url': 'jdbc:oracle:thin:@' + self.getOracleDBConnectionHostPort(rangerDbFlavor, rangerDbHost, None)},
+          'POSTGRES': {'ranger_privelege_user_jdbc_url': 'jdbc:postgresql://' + self.getDBConnectionHostPort(rangerDbFlavor, rangerDbHost) + '/postgres'},
+          'MSSQL': {'ranger_privelege_user_jdbc_url': 'jdbc:sqlserver://' + self.getDBConnectionHostPort(rangerDbFlavor, rangerDbHost) + ';'},
+          'SQLA': {'ranger_privelege_user_jdbc_url': 'jdbc:sqlanywhere:host=' + self.getDBConnectionHostPort(rangerDbFlavor, rangerDbHost) + ';'}
+        }
+        rangerPrivelegeDbProperties = ranger_db_privelege_url_dict.get(rangerDbFlavor, ranger_db_privelege_url_dict['MYSQL'])
+        for key in rangerPrivelegeDbProperties:
+          putRangerEnvProperty(key, rangerPrivelegeDbProperties.get(key))
+
+    # Recommend ldap settings based on ambari.properties configuration
+    if 'ambari-server-properties' in services and \
+        'ambari.ldap.isConfigured' in services['ambari-server-properties'] and \
+        services['ambari-server-properties']['ambari.ldap.isConfigured'].lower() == "true":
+      serverProperties = services['ambari-server-properties']
+      if 'authentication.ldap.baseDn' in serverProperties:
+        putRangerUgsyncSite('ranger.usersync.ldap.searchBase', serverProperties['authentication.ldap.baseDn'])
+      if 'authentication.ldap.groupMembershipAttr' in serverProperties:
+        putRangerUgsyncSite('ranger.usersync.group.memberattributename', serverProperties['authentication.ldap.groupMembershipAttr'])
+      if 'authentication.ldap.groupNamingAttr' in serverProperties:
+        putRangerUgsyncSite('ranger.usersync.group.nameattribute', serverProperties['authentication.ldap.groupNamingAttr'])
+      if 'authentication.ldap.groupObjectClass' in serverProperties:
+        putRangerUgsyncSite('ranger.usersync.group.objectclass', serverProperties['authentication.ldap.groupObjectClass'])
+      if 'authentication.ldap.managerDn' in serverProperties:
+        putRangerUgsyncSite('ranger.usersync.ldap.binddn', serverProperties['authentication.ldap.managerDn'])
+      if 'authentication.ldap.primaryUrl' in serverProperties:
+        ldap_protocol =  'ldap://'
+        if 'authentication.ldap.useSSL' in serverProperties and serverProperties['authentication.ldap.useSSL'] == 'true':
+          ldap_protocol =  'ldaps://'
+        ldapUrl = ldap_protocol + serverProperties['authentication.ldap.primaryUrl'] if serverProperties['authentication.ldap.primaryUrl'] else serverProperties['authentication.ldap.primaryUrl']
+        putRangerUgsyncSite('ranger.usersync.ldap.url', ldapUrl)
+      if 'authentication.ldap.userObjectClass' in serverProperties:
+        putRangerUgsyncSite('ranger.usersync.ldap.user.objectclass', serverProperties['authentication.ldap.userObjectClass'])
+      if 'authentication.ldap.usernameAttribute' in serverProperties:
+        putRangerUgsyncSite('ranger.usersync.ldap.user.nameattribute', serverProperties['authentication.ldap.usernameAttribute'])
+
+
+    # Recommend Ranger Authentication method
+    authMap = {
+      'org.apache.ranger.unixusersync.process.UnixUserGroupBuilder': 'UNIX',
+      'org.apache.ranger.ldapusersync.process.LdapUserGroupBuilder': 'LDAP'
+    }
+
+    if 'ranger-ugsync-site' in services['configurations'] and 'ranger.usersync.source.impl.class' in services['configurations']["ranger-ugsync-site"]["properties"]:
+      rangerUserSyncClass = services['configurations']["ranger-ugsync-site"]["properties"]["ranger.usersync.source.impl.class"]
+      if rangerUserSyncClass in authMap:
+        rangerSqlConnectorProperty = authMap.get(rangerUserSyncClass)
+        putRangerAdminProperty('ranger.authentication.method', rangerSqlConnectorProperty)
+
+
+    if 'ranger-env' in services['configurations'] and 'is_solrCloud_enabled' in services['configurations']["ranger-env"]["properties"]:
+      isSolrCloudEnabled = services['configurations']["ranger-env"]["properties"]["is_solrCloud_enabled"]  == "true"
+    else:
+      isSolrCloudEnabled = False
+
+    if isSolrCloudEnabled:
+      zookeeper_host_port = self.getZKHostPortString(services)
+      ranger_audit_zk_port = ''
+      if zookeeper_host_port:
+        ranger_audit_zk_port = '{0}/{1}'.format(zookeeper_host_port, 'ranger_audits')
+        putRangerAdminProperty('ranger.audit.solr.zookeepers', ranger_audit_zk_port)
+    else:
+      putRangerAdminProperty('ranger.audit.solr.zookeepers', 'NONE')
+
+    # Recommend ranger.audit.solr.zookeepers and xasecure.audit.destination.hdfs.dir
+    include_hdfs = "HDFS" in servicesList
+    if include_hdfs:
+      if 'core-site' in services['configurations'] and ('fs.defaultFS' in services['configurations']['core-site']['properties']):
+        default_fs = services['configurations']['core-site']['properties']['fs.defaultFS']
+        putRangerEnvProperty('xasecure.audit.destination.hdfs.dir', '{0}/{1}/{2}'.format(default_fs,'ranger','audit'))
+
+    # Recommend Ranger supported service's audit properties
+    ranger_services = [
+      {'service_name': 'HDFS', 'audit_file': 'ranger-hdfs-audit'},
+      {'service_name': 'YARN', 'audit_file': 'ranger-yarn-audit'},
+      {'service_name': 'HBASE', 'audit_file': 'ranger-hbase-audit'},
+      {'service_name': 'HIVE', 'audit_file': 'ranger-hive-audit'},
+      {'service_name': 'KNOX', 'audit_file': 'ranger-knox-audit'},
+      {'service_name': 'KAFKA', 'audit_file': 'ranger-kafka-audit'},
+      {'service_name': 'STORM', 'audit_file': 'ranger-storm-audit'}
+    ]
+
+    for item in range(len(ranger_services)):
+      if ranger_services[item]['service_name'] in servicesList:
+        component_audit_file =  ranger_services[item]['audit_file']
+        if component_audit_file in services["configurations"]:
+          ranger_audit_dict = [
+            {'filename': 'ranger-env', 'configname': 'xasecure.audit.destination.db', 'target_configname': 'xasecure.audit.destination.db'},
+            {'filename': 'ranger-env', 'configname': 'xasecure.audit.destination.hdfs', 'target_configname': 'xasecure.audit.destination.hdfs'},
+            {'filename': 'ranger-env', 'configname': 'xasecure.audit.destination.hdfs.dir', 'target_configname': 'xasecure.audit.destination.hdfs.dir'},
+            {'filename': 'ranger-env', 'configname': 'xasecure.audit.destination.solr', 'target_configname': 'xasecure.audit.destination.solr'},
+            {'filename': 'ranger-admin-site', 'configname': 'ranger.audit.solr.urls', 'target_configname': 'xasecure.audit.destination.solr.urls'},
+            {'filename': 'ranger-admin-site', 'configname': 'ranger.audit.solr.zookeepers', 'target_configname': 'xasecure.audit.destination.solr.zookeepers'}
+          ]
+          putRangerAuditProperty = self.putProperty(configurations, component_audit_file, services)
+
+          for item in ranger_audit_dict:
+            if item['filename'] in services["configurations"] and item['configname'] in  services["configurations"][item['filename']]["properties"]:
+              if item['filename'] in configurations and item['configname'] in  configurations[item['filename']]["properties"]:
+                rangerAuditProperty = configurations[item['filename']]["properties"][item['configname']]
+              else:
+                rangerAuditProperty = services["configurations"][item['filename']]["properties"][item['configname']]
+              putRangerAuditProperty(item['target_configname'], rangerAuditProperty)
+
+    audit_solr_flag = 'false'
+    audit_db_flag = 'false'
+    ranger_audit_source_type = 'solr'
+    if 'ranger-env' in services['configurations'] and 'xasecure.audit.destination.solr' in services['configurations']["ranger-env"]["properties"]:
+      audit_solr_flag = services['configurations']["ranger-env"]["properties"]['xasecure.audit.destination.solr']
+    if 'ranger-env' in services['configurations'] and 'xasecure.audit.destination.db' in services['configurations']["ranger-env"]["properties"]:
+      audit_db_flag = services['configurations']["ranger-env"]["properties"]['xasecure.audit.destination.db']
+
+    if audit_db_flag == 'true' and audit_solr_flag == 'false':
+      ranger_audit_source_type = 'db'
+    putRangerAdminProperty('ranger.audit.source.type',ranger_audit_source_type)
+
+    knox_host = 'localhost'
+    knox_port = '8443'
+    if 'KNOX' in servicesList:
+      knox_hosts = self.getComponentHostNames(services, "KNOX", "KNOX_GATEWAY")
+      knox_host = knox_hosts[0]
+      if 'gateway-site' in services['configurations'] and 'gateway.port' in services['configurations']["gateway-site"]["properties"]:
+        knox_port = services['configurations']["gateway-site"]["properties"]['gateway.port']
+      putRangerAdminProperty('ranger.sso.providerurl', 'https://{0}:{1}/gateway/knoxsso/api/v1/websso'.format(knox_host, knox_port))
+
+
+  def recommendYARNConfigurations(self, configurations, clusterData, services, hosts):
+    super(HDP23StackAdvisor, self).recommendYARNConfigurations(configurations, clusterData, services, hosts)
+    putYarnSiteProperty = self.putProperty(configurations, "yarn-site", services)
+    putYarnSitePropertyAttributes = self.putPropertyAttribute(configurations, "yarn-site")
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+
+    if "tez-site" not in services["configurations"]:
+      putYarnSiteProperty('yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes', '')
+    else:
+      putYarnSiteProperty('yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes', 'org.apache.tez.dag.history.logging.ats.TimelineCachePluginImpl')
+
+    if "ranger-env" in services["configurations"] and "ranger-yarn-plugin-properties" in services["configurations"] and \
+        "ranger-yarn-plugin-enabled" in services["configurations"]["ranger-env"]["properties"]:
+      putYarnRangerPluginProperty = self.putProperty(configurations, "ranger-yarn-plugin-properties", services)
+      rangerEnvYarnPluginProperty = services["configurations"]["ranger-env"]["properties"]["ranger-yarn-plugin-enabled"]
+      putYarnRangerPluginProperty("ranger-yarn-plugin-enabled", rangerEnvYarnPluginProperty)
+    rangerPluginEnabled = ''
+    if 'ranger-yarn-plugin-properties' in configurations and 'ranger-yarn-plugin-enabled' in  configurations['ranger-yarn-plugin-properties']['properties']:
+      rangerPluginEnabled = configurations['ranger-yarn-plugin-properties']['properties']['ranger-yarn-plugin-enabled']
+    elif 'ranger-yarn-plugin-properties' in services['configurations'] and 'ranger-yarn-plugin-enabled' in services['configurations']['ranger-yarn-plugin-properties']['properties']:
+      rangerPluginEnabled = services['configurations']['ranger-yarn-plugin-properties']['properties']['ranger-yarn-plugin-enabled']
+
+    if rangerPluginEnabled and (rangerPluginEnabled.lower() == 'Yes'.lower()):
+      putYarnSiteProperty('yarn.acl.enable','true')
+      putYarnSiteProperty('yarn.authorization-provider','org.apache.ranger.authorization.yarn.authorizer.RangerYarnAuthorizer')
+    else:
+      putYarnSitePropertyAttributes('yarn.authorization-provider', 'delete', 'true')
+
+    if 'yarn-site' in services["configurations"] and 'yarn.resourcemanager.proxy-user-privileges.enabled' in services["configurations"]["yarn-site"]["properties"]:
+      if self.isSecurityEnabled(services):
+        # enable proxy-user privileges for secure clusters for long-running services (spark streaming etc)
+        putYarnSiteProperty('yarn.resourcemanager.proxy-user-privileges.enabled', 'true')
+        if 'RANGER_KMS' in servicesList:
+          # disable proxy-user privileges on secure clusters as it does not work with TDE
+          putYarnSiteProperty('yarn.resourcemanager.proxy-user-privileges.enabled', 'false')
+      else:
+        putYarnSiteProperty('yarn.resourcemanager.proxy-user-privileges.enabled', 'false')
+
+
+  def recommendSqoopConfigurations(self, configurations, clusterData, services, hosts):
+    putSqoopSiteProperty = self.putProperty(configurations, "sqoop-site", services)
+
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    if "ATLAS" in servicesList:
+      putSqoopSiteProperty('sqoop.job.data.publish.class', 'org.apache.atlas.sqoop.hook.SqoopHook')
+
+  def recommendStormConfigurations(self, configurations, clusterData, services, hosts):
+    super(HDP23StackAdvisor, self).recommendStormConfigurations(configurations, clusterData, services, hosts)
+    putStormStartupProperty = self.putProperty(configurations, "storm-site", services)
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+
+    if "storm-site" in services["configurations"]:
+      # atlas
+      notifier_plugin_property = "storm.topology.submission.notifier.plugin.class"
+      if notifier_plugin_property in services["configurations"]["storm-site"]["properties"] and \
+         services["configurations"]["storm-site"]["properties"][notifier_plugin_property] is not None:
+
+        notifier_plugin_value = services["configurations"]["storm-site"]["properties"][notifier_plugin_property]
+      else:
+        notifier_plugin_value = " "
+
+      atlas_is_present = "ATLAS" in servicesList
+      atlas_hook_class = "org.apache.atlas.storm.hook.StormAtlasHook"
+      atlas_hook_is_set = atlas_hook_class in notifier_plugin_value
+
+      if atlas_is_present and not atlas_hook_is_set:
+        notifier_plugin_value = atlas_hook_class if notifier_plugin_value == " " else ",".join([notifier_plugin_value, atlas_hook_class])
+
+      if not atlas_is_present and atlas_hook_is_set:
+        application_classes = [item for item in notifier_plugin_value.split(",") if item != atlas_hook_class and item != " "]
+        notifier_plugin_value = ",".join(application_classes) if application_classes else " "
+
+      if notifier_plugin_value.strip() != "":
+        putStormStartupProperty(notifier_plugin_property, notifier_plugin_value)
+      else:
+        putStormStartupPropertyAttribute = self.putPropertyAttribute(configurations, "storm-site")
+        putStormStartupPropertyAttribute(notifier_plugin_property, 'delete', 'true')
+
+  def getServiceConfigurationValidators(self):
+    parentValidators = super(HDP23StackAdvisor, self).getServiceConfigurationValidators()
+    childValidators = {
+      "HDFS": {"hdfs-site": self.validateHDFSConfigurations},
+      "HIVE": {"hiveserver2-site": self.validateHiveServer2Configurations,
+               "hive-site": self.validateHiveConfigurations},
+      "HBASE": {"hbase-site": self.validateHBASEConfigurations},
+      "KAFKA": {"kafka-broker": self.validateKAFKAConfigurations},
+      "YARN": {"yarn-site": self.validateYARNConfigurations},
+      "RANGER": {"admin-properties": self.validateRangerAdminConfigurations,
+                 "ranger-env": self.validateRangerConfigurationsEnv}
+    }
+    self.mergeValidators(parentValidators, childValidators)
+    return parentValidators
+
+  def validateHDFSConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    parentValidationProblems = super(HDP23StackAdvisor, self).validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, hosts)
+
+    # We can not access property hadoop.security.authentication from the
+    # other config (core-site). That's why we are using another heuristics here
+    hdfs_site = properties
+    validationItems = [] #Adding Ranger Plugin logic here
+    ranger_plugin_properties = getSiteProperties(configurations, "ranger-hdfs-plugin-properties")
+    ranger_plugin_enabled = ranger_plugin_properties['ranger-hdfs-plugin-enabled'] if ranger_plugin_properties else 'No'
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    if ("RANGER" in servicesList) and (ranger_plugin_enabled.lower() == 'Yes'.lower()):
+      if 'dfs.namenode.inode.attributes.provider.class' not in hdfs_site or \
+        hdfs_site['dfs.namenode.inode.attributes.provider.class'].lower() != 'org.apache.ranger.authorization.hadoop.RangerHdfsAuthorizer'.lower():
+        validationItems.append({"config-name": 'dfs.namenode.inode.attributes.provider.class',
+                                    "item": self.getWarnItem(
+                                      "dfs.namenode.inode.attributes.provider.class needs to be set to 'org.apache.ranger.authorization.hadoop.RangerHdfsAuthorizer' if Ranger HDFS Plugin is enabled.")})
+
+    validationProblems = self.toConfigurationValidationProblems(validationItems, "hdfs-site")
+    validationProblems.extend(parentValidationProblems)
+    return validationProblems
+
+
+  def validateHiveConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    parentValidationProblems = super(HDP23StackAdvisor, self).validateHiveConfigurations(properties, recommendedDefaults, configurations, services, hosts)
+    hive_site = properties
+    hive_env_properties = getSiteProperties(configurations, "hive-env")
+    validationItems = []
+    sqla_db_used = "hive_database" in hive_env_properties and \
+                   hive_env_properties['hive_database'] == 'Existing SQL Anywhere Database'
+    prop_name = "datanucleus.rdbms.datastoreAdapterClassName"
+    prop_value = "org.datanucleus.store.rdbms.adapter.SQLAnywhereAdapter"
+    if sqla_db_used:
+      if not prop_name in hive_site:
+        validationItems.append({"config-name": prop_name,
+                              "item": self.getWarnItem(
+                              "If Hive using SQL Anywhere db." \
+                              " {0} needs to be added with value {1}".format(prop_name,prop_value))})
+      elif prop_name in hive_site and hive_site[prop_name] != "org.datanucleus.store.rdbms.adapter.SQLAnywhereAdapter":
+        validationItems.append({"config-name": prop_name,
+                                "item": self.getWarnItem(
+                                  "If Hive using SQL Anywhere db." \
+                                  " {0} needs to be set to {1}".format(prop_name,prop_value))})
+
+    configurationValidationProblems = self.toConfigurationValidationProblems(validationItems, "hive-site")
+    configurationValidationProblems.extend(parentValidationProblems)
+    return configurationValidationProblems
+
+  def validateHiveServer2Configurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    super(HDP23StackAdvisor, self).validateHiveServer2Configurations(properties, recommendedDefaults, configurations, services, hosts)
+    hive_server2 = properties
+    validationItems = []
+    #Adding Ranger Plugin logic here
+    ranger_plugin_properties = getSiteProperties(configurations, "ranger-hive-plugin-properties")
+    hive_env_properties = getSiteProperties(configurations, "hive-env")
+    ranger_plugin_enabled = 'hive_security_authorization' in hive_env_properties and hive_env_properties['hive_security_authorization'].lower() == 'ranger'
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    ##Add stack validations only if Ranger is enabled.
+    if ("RANGER" in servicesList):
+      ##Add stack validations for  Ranger plugin enabled.
+      if ranger_plugin_enabled:
+        prop_name = 'hive.security.authorization.manager'
+        prop_val = "org.apache.ranger.authorization.hive.authorizer.RangerHiveAuthorizerFactory"
+        if prop_name in hive_server2 and hive_server2[prop_name] != prop_val:
+          validationItems.append({"config-name": prop_name,
+                                  "item": self.getWarnItem(
+                                  "If Ranger Hive Plugin is enabled."\
+                                  " {0} under hiveserver2-site needs to be set to {1}".format(prop_name,prop_val))})
+        prop_name = 'hive.security.authenticator.manager'
+        prop_val = "org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator"
+        if prop_name in hive_server2 and hive_server2[prop_name] != prop_val:
+          validationItems.append({"config-name": prop_name,
+                                  "item": self.getWarnItem(
+                                  "If Ranger Hive Plugin is enabled."\
+                                  " {0} under hiveserver2-site needs to be set to {1}".format(prop_name,prop_val))})
+        prop_name = 'hive.security.authorization.enabled'
+        prop_val = 'true'
+        if prop_name in hive_server2 and hive_server2[prop_name] != prop_val:
+          validationItems.append({"config-name": prop_name,
+                                  "item": self.getWarnItem(
+                                  "If Ranger Hive Plugin is enabled."\
+                                  " {0} under hiveserver2-site needs to be set to {1}".format(prop_name, prop_val))})
+        prop_name = 'hive.conf.restricted.list'
+        prop_vals = 'hive.security.authorization.enabled,hive.security.authorization.manager,hive.security.authenticator.manager'.split(',')
+        current_vals = []
+        missing_vals = []
+        if hive_server2 and prop_name in hive_server2:
+          current_vals = hive_server2[prop_name].split(',')
+          current_vals = [x.strip() for x in current_vals]
+
+        for val in prop_vals:
+          if not val in current_vals:
+            missing_vals.append(val)
+
+        if missing_vals:
+          validationItems.append({"config-name": prop_name,
+            "item": self.getWarnItem("If Ranger Hive Plugin is enabled."\
+            " {0} under hiveserver2-site needs to contain missing value {1}".format(prop_name, ','.join(missing_vals)))})
+      ##Add stack validations for  Ranger plugin disabled.
+      elif not ranger_plugin_enabled:
+        prop_name = 'hive.security.authorization.manager'
+        prop_val = "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"
+        if prop_name in hive_server2 and hive_server2[prop_name] != prop_val:
+          validationItems.append({"config-name": prop_name,
+                                  "item": self.getWarnItem(
+                                  "If Ranger Hive Plugin is disabled."\
+                                  " {0} needs to be set to {1}".format(prop_name,prop_val))})
+        prop_name = 'hive.security.authenticator.manager'
+        prop_val = "org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator"
+        if prop_name in hive_server2 and hive_server2[prop_name] != prop_val:
+          validationItems.append({"config-name": prop_name,
+                                  "item": self.getWarnItem(
+                                  "If Ranger Hive Plugin is disabled."\
+                                  " {0} needs to be set to {1}".format(prop_name,prop_val))})
+    return self.toConfigurationValidationProblems(validationItems, "hiveserver2-site")
+
+  def validateHBASEConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    super(HDP23StackAdvisor, self).validateHBASEConfigurations(properties, recommendedDefaults, configurations, services, hosts)
+    hbase_site = properties
+    validationItems = []
+
+    #Adding Ranger Plugin logic here
+    ranger_plugin_properties = getSiteProperties(configurations, "ranger-hbase-plugin-properties")
+    ranger_plugin_enabled = ranger_plugin_properties['ranger-hbase-plugin-enabled'] if ranger_plugin_properties else 'No'
+    prop_name = 'hbase.security.authorization'
+    prop_val = "true"
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    if ("RANGER" in servicesList) and (ranger_plugin_enabled.lower() == 'Yes'.lower()):
+      if hbase_site[prop_name] != prop_val:
+        validationItems.append({"config-name": prop_name,
+                                "item": self.getWarnItem(
+                                "If Ranger HBase Plugin is enabled."\
+                                "{0} needs to be set to {1}".format(prop_name,prop_val))})
+      prop_name = "hbase.coprocessor.master.classes"
+      prop_val = "org.apache.ranger.authorization.hbase.RangerAuthorizationCoprocessor"
+      exclude_val = "org.apache.hadoop.hbase.security.access.AccessController"
+      if (prop_val in hbase_site[prop_name] and exclude_val not in hbase_site[prop_name]):
+        pass
+      else:
+        validationItems.append({"config-name": prop_name,
+                                "item": self.getWarnItem(
+                                "If Ranger HBase Plugin is enabled."\
+                                " {0} needs to contain {1} instead of {2}".format(prop_name,prop_val,exclude_val))})
+      prop_name = "hbase.coprocessor.region.classes"
+      prop_val = "org.apache.ranger.authorization.hbase.RangerAuthorizationCoprocessor"
+      if (prop_val in hbase_site[prop_name] and exclude_val not in hbase_site[prop_name]):
+        pass
+      else:
+        validationItems.append({"config-name": prop_name,
+                                "item": self.getWarnItem(
+                                "If Ranger HBase Plugin is enabled."\
+                                " {0} needs to contain {1} instead of {2}".format(prop_name,prop_val,exclude_val))})
+
+    return self.toConfigurationValidationProblems(validationItems, "hbase-site")
+
+  def validateKAFKAConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    kafka_broker = properties
+    validationItems = []
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    #Adding Ranger Plugin logic here
+    ranger_plugin_properties = getSiteProperties(configurations, "ranger-kafka-plugin-properties")
+    ranger_plugin_enabled = ranger_plugin_properties['ranger-kafka-plugin-enabled'] if ranger_plugin_properties else 'No'
+    prop_name = 'authorizer.class.name'
+    prop_val = "org.apache.ranger.authorization.kafka.authorizer.RangerKafkaAuthorizer"
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    if ("RANGER" in servicesList) and (ranger_plugin_enabled.lower() == 'Yes'.lower()):
+      if kafka_broker[prop_name] != prop_val:
+        validationItems.append({"config-name": prop_name,
+                                "item": self.getWarnItem(
+                                "If Ranger Kafka Plugin is enabled."\
+                                "{0} needs to be set to {1}".format(prop_name,prop_val))})
+
+    if 'KERBEROS' in servicesList and 'security.inter.broker.protocol' in properties:
+      interBrokerValue = properties['security.inter.broker.protocol']
+      prop_name = 'listeners'
+      prop_value =  properties[prop_name]
+      if interBrokerValue and interBrokerValue not in prop_value:
+        validationItems.append({"config-name": "listeners",
+                                "item": self.getWarnItem("If kerberos is enabled "\
+                                "{0}  need to contain {1} as one of "\
+                                "the protocol".format(prop_name, interBrokerValue))})
+
+    return self.toConfigurationValidationProblems(validationItems, "kafka-broker")
+
+  def validateYARNConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    yarn_site = properties
+    validationItems = []
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    if 'RANGER_KMS' in servicesList and 'KERBEROS' in servicesList:
+      yarn_resource_proxy_enabled = yarn_site['yarn.resourcemanager.proxy-user-privileges.enabled']
+      if yarn_resource_proxy_enabled.lower() == 'true':
+        validationItems.append({"config-name": 'yarn.resourcemanager.proxy-user-privileges.enabled',
+          "item": self.getWarnItem("If Ranger KMS service is installed set yarn.resourcemanager.proxy-user-privileges.enabled " \
+          "property value as false under yarn-site"
+        )})
+
+    return self.toConfigurationValidationProblems(validationItems, "yarn-site")
+
+  def isComponentUsingCardinalityForLayout(self, componentName):
+    return componentName in ['NFS_GATEWAY', 'PHOENIX_QUERY_SERVER', 'SPARK_THRIFTSERVER']
+
+  def validateRangerAdminConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    ranger_site = properties
+    validationItems = []
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    if 'RANGER' in servicesList and 'policymgr_external_url' in ranger_site:
+      policymgr_mgr_url = ranger_site['policymgr_external_url']
+      if policymgr_mgr_url.endswith('/'):
+        validationItems.append({'config-name':'policymgr_external_url',
+                               'item':self.getWarnItem('Ranger External URL should not contain trailing slash "/"')})
+    return self.toConfigurationValidationProblems(validationItems,'admin-properties')
+
+  def validateRangerConfigurationsEnv(self, properties, recommendedDefaults, configurations, services, hosts):
+    parentValidationProblems = super(HDP23StackAdvisor, self).validateRangerConfigurationsEnv(properties, recommendedDefaults, configurations, services, hosts)
+    ranger_env_properties = properties
+    validationItems = []
+    security_enabled = self.isSecurityEnabled(services)
+
+    if "ranger-kafka-plugin-enabled" in ranger_env_properties and ranger_env_properties["ranger-kafka-plugin-enabled"].lower() == 'yes' and not security_enabled:
+      validationItems.append({"config-name": "ranger-kafka-plugin-enabled",
+                              "item": self.getWarnItem(
+                                "Ranger Kafka plugin should not be enabled in non-kerberos environment.")})
+
+    validationProblems = self.toConfigurationValidationProblems(validationItems, "ranger-env")
+    validationProblems.extend(parentValidationProblems)
+    return validationProblems
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_24.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_24.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_24.py
new file mode 100755
index 0000000..813fe42
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_24.py
@@ -0,0 +1,29 @@
+#!/usr/bin/env ambari-python-wrap
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management.core.logger import Logger
+
+try:
+  from stack_advisor_23 import *
+except ImportError:
+  #Ignore ImportError
+  print("stack_advisor_23 not found")
+
+class HDP24StackAdvisor(HDP23StackAdvisor):
+ pass


[38/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/metrics.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/metrics.json
new file mode 100755
index 0000000..b1e95fe
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/metrics.json
@@ -0,0 +1,7769 @@
+{
+  "NAMENODE": {
+    "Component": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "default": {
+            "metrics/cpu/cpu_idle":{
+              "metric":"cpu_idle",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_nice":{
+              "metric":"cpu_nice",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_system":{
+              "metric":"cpu_system",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_user":{
+              "metric":"cpu_user",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_wio":{
+              "metric":"cpu_wio",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/disk_free":{
+              "metric":"disk_free",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/disk_total":{
+              "metric":"disk_total",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/load/load_fifteen":{
+              "metric":"load_fifteen",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/load/load_five":{
+              "metric":"load_five",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/load/load_one":{
+              "metric":"load_one",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_buffers":{
+              "metric":"mem_buffers",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_cached":{
+              "metric":"mem_cached",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_free":{
+              "metric":"mem_free",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_shared":{
+              "metric":"mem_shared",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_total":{
+              "metric":"mem_total",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/swap_free":{
+              "metric":"swap_free",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/swap_total":{
+              "metric":"swap_total",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/network/bytes_in":{
+              "metric":"bytes_in",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/network/bytes_out":{
+              "metric":"bytes_out",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/network/pkts_in":{
+              "metric":"pkts_in",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/network/pkts_out":{
+              "metric":"pkts_out",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/process/proc_run":{
+              "metric":"proc_run",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/process/proc_total":{
+              "metric":"proc_total",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/read_count":{
+              "metric":"read_count",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/write_count":{
+              "metric":"write_count",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/read_bytes":{
+              "metric":"read_bytes",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/write_bytes":{
+              "metric":"write_bytes",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/read_time":{
+              "metric":"read_time",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/write_time":{
+              "metric":"write_time",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/dfs/FSNamesystem/TotalLoad": {
+              "metric": "dfs.FSNamesystem.TotalLoad",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/CapacityTotal": {
+              "metric": "dfs.FSNamesystem.CapacityTotal",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/CapacityUsed": {
+              "metric": "dfs.FSNamesystem.CapacityUsed",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/CapacityRemaining": {
+              "metric": "dfs.FSNamesystem.CapacityRemaining",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/CapacityNonDFSUsed": {
+              "metric": "dfs.FSNamesystem.CapacityUsedNonDFS",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/BlockCapacity": {
+              "metric": "dfs.FSNamesystem.BlockCapacity",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/GetListingOps": {
+              "metric": "dfs.namenode.GetListingOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/FilesAppended": {
+              "metric": "dfs.namenode.FilesAppended",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/TotalFileOps": {
+              "metric": "dfs.namenode.TotalFileOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/fsync_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.FsyncAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_avg_time": {
+              "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/renewLease_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.RenewLeaseNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/getFileInfo_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.GetFileInfoAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+              "unit": "MB",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/complete_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.CompleteAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/setPermission_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.SetPermissionNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/CapacityTotalGB": {
+              "metric": "dfs.FSNamesystem.CapacityTotalGB",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/setOwner_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.SetOwnerNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/getBlockLocations_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/CapacityUsedGB": {
+              "metric": "dfs.FSNamesystem.CapacityUsedGB",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/AddBlockOps": {
+              "metric": "dfs.namenode.AddBlockOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/FilesDeleted": {
+              "metric": "dfs.namenode.FilesDeleted",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/Syncs_avg_time": {
+              "metric": "dfs.namenode.SyncsAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsBlocked": {
+              "metric": "jvm.JvmMetrics.ThreadsBlocked",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_num_ops": {
+              "metric": "rpc.rpc.RpcQueueTimeNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/blockReport_avg_time": {
+              "metric": "dfs.namenode.BlockReportAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/disk/part_max_used": {
+              "metric": "part_max_used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/getFileInfo_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.GetFileInfoNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/getEditLogSize_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_num_ops": {
+              "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/blockReceived_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/versionRequest_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.VersionRequestAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/versionRequest_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.VersionRequestNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/addBlock_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.AddBlockNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/FilesCreated": {
+              "metric": "dfs.namenode.FilesCreated",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rename_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.RenameAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/setSafeMode_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.SetSafeModeNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/setPermission_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.SetPermissionAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/FilesRenamed": {
+              "metric": "dfs.namenode.FilesRenamed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/register_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/setReplication_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.SetReplicationNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_num_ops": {
+              "metric": "ugi.UgiMetrics.LoginFailureNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/GetBlockLocations": {
+              "metric": "dfs.namenode.GetBlockLocations",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/fsync_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.FsyncNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/create_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.CreateAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/PendingReplicationBlocks": {
+              "metric": "dfs.FSNamesystem.PendingReplicationBlocks",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/delete_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.DeleteAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/FileInfoOps": {
+              "metric": "dfs.namenode.FileInfoOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/sendHeartbeat_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.SendHeartbeatNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/DeleteFileOps": {
+              "metric": "dfs.namenode.DeleteFileOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_avg_time": {
+              "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/blockReport_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.BlockReportNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/setSafeMode_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.SetSafeModeAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthenticationSuccesses": {
+              "metric": "rpc.rpc.RpcAuthenticationSuccesses",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/PendingDeletionBlocks": {
+              "metric": "dfs.FSNamesystem.PendingDeletionBlocks",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthenticationFailures": {
+              "metric": "rpc.rpc.RpcAuthenticationFailures",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/getEditLogSize_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/FilesInGetListingOps": {
+              "metric": "dfs.namenode.FilesInGetListingOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsRunnable": {
+              "metric": "jvm.JvmMetrics.ThreadsRunnable",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/complete_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.CompleteNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsNew": {
+              "metric": "jvm.JvmMetrics.ThreadsNew",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthorizationFailures": {
+              "metric": "rpc.rpc.RpcAuthorizationFailures",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/Syncs_num_ops": {
+              "metric": "dfs.namenode.SyncsNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_avg_time": {
+              "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/blockReceived_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/setReplication_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.SetReplicationAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rollEditLog_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.RollEditLogAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/SentBytes": {
+              "metric": "rpc.rpc.SentBytes",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/FilesTotal": {
+              "metric": "dfs.FSNamesystem.FilesTotal",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logWarn": {
+              "metric": "jvm.JvmMetrics.LogWarn",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/ExcessBlocks": {
+              "metric": "dfs.FSNamesystem.ExcessBlocks",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTimedWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/gcCount": {
+              "metric": "jvm.JvmMetrics.GcCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/ReceivedBytes": {
+              "metric": "rpc.rpc.ReceivedBytes",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/blockReport_num_ops": {
+              "metric": "dfs.namenode.BlockReportNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/SafemodeTime": {
+              "metric": "dfs.namenode.SafemodeTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/mkdirs_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.MkdirsAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/NumOpenConnections": {
+              "metric": "rpc.rpc.NumOpenConnections",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemHeapUsedM",
+              "unit": "MB",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks": {
+              "metric": "dfs.FSNamesystem.ScheduledReplicationBlocks",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsWaiting",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/BlocksTotal": {
+              "metric": "dfs.FSNamesystem.BlocksTotal",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/gcTimeMillis": {
+              "metric": "jvm.JvmMetrics.GcTimeMillis",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/getBlockLocations_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/Transactions_num_ops": {
+              "metric": "dfs.namenode.TransactionsNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/create_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.CreateNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTerminated": {
+              "metric": "jvm.JvmMetrics.ThreadsTerminated",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/CapacityRemainingGB": {
+              "metric": "dfs.FSNamesystem.CapacityRemainingGB",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/Transactions_avg_time": {
+              "metric": "dfs.namenode.TransactionsAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/boottime": {
+              "metric": "boottime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/MissingBlocks": {
+              "metric": "dfs.FSNamesystem.MissingBlocks",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/callQueueLen": {
+              "metric": "rpc.rpc.CallQueueLength",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/delete_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.DeleteNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/CorruptBlocks": {
+              "metric": "dfs.FSNamesystem.CorruptBlocks",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rename_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.RenameNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/blockReport_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.BlockReportAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/mkdirs_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.MkdirsNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/logInfo": {
+              "metric": "jvm.JvmMetrics.LogInfo",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/fsImageLoadTime": {
+              "metric": "dfs.namenode.FsImageLoadTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/getListing_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.GetListingNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rollEditLog_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.RollEditLogNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/addBlock_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.AddBlockAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/setOwner_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.SetOwnerAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_num_ops": {
+              "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/UnderReplicatedBlocks": {
+              "metric": "dfs.FSNamesystem.UnderReplicatedBlocks",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/sendHeartbeat_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.SendHeartbeatAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/CreateFileOps": {
+              "metric": "dfs.namenode.CreateFileOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logError": {
+              "metric": "jvm.JvmMetrics.LogError",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_avg_time": {
+              "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/register_num_ops": {
+              "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthorizationSuccesses": {
+              "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/getListing_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.GetListingAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logFatal": {
+              "metric": "jvm.JvmMetrics.LogFatal",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/renewLease_avg_time": {
+              "metric": "rpcdetailed.rpcdetailed.RenewLeaseAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/default/StartupProgress/ElapsedTime": {
+              "metric": "default.StartupProgress.ElapsedTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/default/StartupProgress/LoadingEditsCount": {
+              "metric": "default.StartupProgress.LoadingEditsCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/default/StartupProgress/LoadingEditsElapsedTime": {
+              "metric": "default.StartupProgress.LoadingEditsElapsedTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/default/StartupProgress/LoadingEditsPercentComplete": {
+              "metric": "default.StartupProgress.LoadingEditsPercentComplete",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/default/StartupProgress/LoadingEditsTotal": {
+              "metric": "default.StartupProgress.LoadingEditsTotal",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/default/StartupProgress/LoadingFsImageCount": {
+              "metric": "default.StartupProgress.LoadingFsImageCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/default/StartupProgress/LoadingFsImageElapsedTime": {
+              "metric": "default.StartupProgress.LoadingFsImageElapsedTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/default/StartupProgress/LoadingFsImagePercentComplete": {
+              "metric": "default.StartupProgress.LoadingFsImagePercentComplete",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/default/StartupProgress/LoadingFsImageTotal": {
+              "metric": "default.StartupProgress.LoadingFsImageTotal",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/default/StartupProgress/PercentComplete": {
+              "metric": "default.StartupProgress.PercentComplete",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/default/StartupProgress/SafeModeCount": {
+              "metric": "default.StartupProgress.SafeModeCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/default/StartupProgress/SafeModeElapsedTime": {
+              "metric": "default.StartupProgress.SafeModeElapsedTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/default/StartupProgress/SafeModePercentComplete": {
+              "metric": "default.StartupProgress.SafeModePercentComplete",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/default/StartupProgress/SafeModeTotal": {
+              "metric": "default.StartupProgress.SafeModeTotal",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/default/StartupProgress/SavingCheckpointCount": {
+              "metric": "default.StartupProgress.SavingCheckpointCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/default/StartupProgress/SavingCheckpointElapsedTime": {
+              "metric": "default.StartupProgress.SavingCheckpointElapsedTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/default/StartupProgress/SavingCheckpointPercentComplete": {
+              "metric": "default.StartupProgress.SavingCheckpointPercentComplete",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/default/StartupProgress/SavingCheckpointTotal": {
+              "metric": "default.StartupProgress.SavingCheckpointTotal",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/ExpiredHeartbeats": {
+              "metric": "dfs.FSNamesystem.ExpiredHeartbeats",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/LastCheckpointTime": {
+              "metric": "dfs.FSNamesystem.LastCheckpointTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/LastWrittenTransactionId": {
+              "metric": "dfs.FSNamesystem.LastWrittenTransactionId",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/MillisSinceLastLoadedEdits": {
+              "metric": "dfs.FSNamesystem.MillisSinceLastLoadedEdits",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/MissingReplOneBlocks": {
+              "metric": "dfs.FSNamesystem.MissingReplOneBlocks",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/PendingDataNodeMessageCount": {
+              "metric": "dfs.FSNamesystem.PendingDataNodeMessageCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/PostponedMisreplicatedBlocks": {
+              "metric": "dfs.FSNamesystem.PostponedMisreplicatedBlocks",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/Snapshots": {
+              "metric": "dfs.FSNamesystem.Snapshots",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/SnapshottableDirectories": {
+              "metric": "dfs.FSNamesystem.SnapshottableDirectories",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/StaleDataNodes": {
+              "metric": "dfs.FSNamesystem.StaleDataNodes",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/TotalFiles": {
+              "metric": "dfs.FSNamesystem.TotalFiles",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/TransactionsSinceLastCheckpoint": {
+              "metric": "dfs.FSNamesystem.TransactionsSinceLastCheckpoint",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/TransactionsSinceLastLogRoll": {
+              "metric": "dfs.FSNamesystem.TransactionsSinceLastLogRoll",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/AllowSnapshotOps": {
+              "metric": "dfs.namenode.AllowSnapshotOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/BlockReceivedAndDeletedOps": {
+              "metric": "dfs.namenode.BlockReceivedAndDeletedOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/CacheReportAvgTime": {
+              "metric": "dfs.namenode.CacheReportAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/CacheReportNumOps": {
+              "metric": "dfs.namenode.CacheReportNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/CreateSnapshotOps": {
+              "metric": "dfs.namenode.CreateSnapshotOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/CreateSymlinkOps": {
+              "metric": "dfs.namenode.CreateSymlinkOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/DeleteSnapshotOps": {
+              "metric": "dfs.namenode.DeleteSnapshotOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/DisallowSnapshotOps": {
+              "metric": "dfs.namenode.DisallowSnapshotOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/FilesTruncated": {
+              "metric": "dfs.namenode.FilesTruncated",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/GetAdditionalDatanodeOps": {
+              "metric": "dfs.namenode.GetAdditionalDatanodeOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/GetEditAvgTime": {
+              "metric": "dfs.namenode.GetEditAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/GetEditNumOps": {
+              "metric": "dfs.namenode.GetEditNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/GetImageAvgTime": {
+              "metric": "dfs.namenode.GetImageAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/GetImageNumOps": {
+              "metric": "dfs.namenode.GetImageNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/GetLinkTargetOps": {
+              "metric": "dfs.namenode.GetLinkTargetOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/ListSnapshottableDirOps": {
+              "metric": "dfs.namenode.ListSnapshottableDirOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/PutImageAvgTime": {
+              "metric": "dfs.namenode.PutImageAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/PutImageNumOps": {
+              "metric": "dfs.namenode.PutImageNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/RenameSnapshotOps": {
+              "metric": "dfs.namenode.RenameSnapshotOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/SnapshotDiffReportOps": {
+              "metric": "dfs.namenode.SnapshotDiffReportOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/StorageBlockReportOps": {
+              "metric": "dfs.namenode.StorageBlockReportOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/TransactionsBatchedInSync": {
+              "metric": "dfs.namenode.TransactionsBatchedInSync",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcCountConcurrentMarkSweep": {
+              "metric": "jvm.JvmMetrics.GcCountConcurrentMarkSweep",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcCountParNew": {
+              "metric": "jvm.JvmMetrics.GcCountParNew",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcNumInfoThresholdExceeded": {
+              "metric": "jvm.JvmMetrics.GcNumInfoThresholdExceeded",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcNumWarnThresholdExceeded": {
+              "metric": "jvm.JvmMetrics.GcNumWarnThresholdExceeded",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcTimeMillisConcurrentMarkSweep": {
+              "metric": "jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcTimeMillisParNew": {
+              "metric": "jvm.JvmMetrics.GcTimeMillisParNew",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcTotalExtraSleepTime": {
+              "metric": "jvm.JvmMetrics.GcTotalExtraSleepTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/MemHeapMaxM": {
+              "metric": "jvm.JvmMetrics.MemHeapMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/MemMaxM": {
+              "metric": "jvm.JvmMetrics.MemMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/MemNonHeapMaxM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/DroppedPubAll": {
+              "metric": "metricssystem.MetricsSystem.DroppedPubAll",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumActiveSinks": {
+              "metric": "metricssystem.MetricsSystem.NumActiveSinks",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumActiveSources": {
+              "metric": "metricssystem.MetricsSystem.NumActiveSources",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumAllSinks": {
+              "metric": "metricssystem.MetricsSystem.NumAllSinks",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumAllSources": {
+              "metric": "metricssystem.MetricsSystem.NumAllSources",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/PublishAvgTime": {
+              "metric": "metricssystem.MetricsSystem.PublishAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/PublishNumOps": {
+              "metric": "metricssystem.MetricsSystem.PublishNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineAvgTime": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineDropped": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineDropped",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineNumOps": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineQsize": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineQsize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/SnapshotAvgTime": {
+              "metric": "metricssystem.MetricsSystem.SnapshotAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/SnapshotNumOps": {
+              "metric": "metricssystem.MetricsSystem.SnapshotNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/RetryCache/NameNodeRetryCache/CacheCleared": {
+              "metric": "rpc.RetryCache.NameNodeRetryCache.CacheCleared",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/RetryCache/NameNodeRetryCache/CacheHit": {
+              "metric": "rpc.RetryCache.NameNodeRetryCache.CacheHit",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/RetryCache/NameNodeRetryCache/CacheUpdated": {
+              "metric": "rpc.RetryCache.NameNodeRetryCache.CacheUpdated",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetServerDefaultsAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.GetServerDefaultsAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetServerDefaultsNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.GetServerDefaultsNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetTransactionIdAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.GetTransactionIdAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetTransactionIdNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.GetTransactionIdNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/IOExceptionAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.IOExceptionAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/IOExceptionNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.IOExceptionNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/PathIsNotEmptyDirectoryExceptionAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.PathIsNotEmptyDirectoryExceptionAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/PathIsNotEmptyDirectoryExceptionNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.PathIsNotEmptyDirectoryExceptionNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/RecoverLeaseAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.RecoverLeaseAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/RecoverLeaseNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.RecoverLeaseNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/Rename2AvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.Rename2AvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/Rename2NumOps": {
+              "metric": "rpcdetailed.rpcdetailed.Rename2NumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/SetTimesAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.SetTimesAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/SetTimesNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.SetTimesNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/UgiMetrics/GetGroupsAvgTime": {
+              "metric": "ugi.UgiMetrics.GetGroupsAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/UgiMetrics/GetGroupsNumOps": {
+              "metric": "ugi.UgiMetrics.GetGroupsNumOps",
+              "pointInTime": true,
+              "temporal": true
+            }
+          }
+        }
+      },
+      {
+        "type": "jmx",
+        "metrics": {
+          "default": {
+            "metrics/dfs/namenode/Used": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Used",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/TotalLoad": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.TotalLoad",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/memMaxM": {
+              "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemMaxM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/BlockCapacity": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.BlockCapacity",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/TotalFiles": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalFiles",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/HostName": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.HostName",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/GetListingOps": {
+              "metric": "Hadoop:service=NameNode,name=NameNode.GetListingOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/UpgradeFinalized": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.UpgradeFinalized",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/getProtocolVersion_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getProtocolVersion_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/fsync_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.fsync_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/ugi/loginSuccess_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=ugi.loginSuccess_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/Safemode": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Safemode",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/CorruptBlocks": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CorruptBlocks",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/LiveNodes": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.LiveNodes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/renewLease_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.renewLease_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/getFileInfo_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getFileInfo_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/CapacityRemaining": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityRemaining",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/PercentRemaining": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentRemaining",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/memNonHeapUsedM": {
+              "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemNonHeapUsedM",
+              "unit": "MB",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/complete_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.complete_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/CapacityTotalGB": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityTotalGB",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/getBlockLocations_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getBlockLocations_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/AddBlockOps": {
+              "metric": "Hadoop:service=NameNode,name=NameNode.AddBlockOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/CapacityUsedGB": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityUsedGB",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/Syncs_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=NameNode.Syncs_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsBlocked": {
+              "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsBlocked",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTime_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcQueueTime_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/PercentUsed": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentUsed",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/DecomNodes": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DecomNodes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/blockReport_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=NameNode.blockReport_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/NonDfsUsedSpace": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.NonDfsUsedSpace",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/UpgradeFinalized": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.UpgradeFinalized",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/getFileInfo_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getFileInfo_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/getEditLogSize_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getEditLogSize_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/ugi/loginSuccess_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=ugi.loginSuccess_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/blockReceived_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReceived_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/Safemode": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Safemode",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/FilesCreated": {
+              "metric": "Hadoop:service=NameNode,name=NameNode.FilesCreated",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/addBlock_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.addBlock_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/DecomNodes": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DecomNodes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/CapacityUsed": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityUsed",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/NonHeapMemoryUsed": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/memNonHeapCommittedM": {
+              "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemNonHeapCommittedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/DeadNodes": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DeadNodes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/PercentUsed": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentUsed",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/Free": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Free",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/Total": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Total",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/GetBlockLocations": {
+              "metric": "Hadoop:service=NameNode,name=NameNode.GetBlockLocations",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/ugi/loginFailure_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=ugi.loginFailure_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/fsync_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.fsync_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/HeapMemoryMax": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/create_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.create_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/PendingReplicationBlocks": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.PendingReplicationBlocks",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/UnderReplicatedBlocks": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.UnderReplicatedBlocks",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/FileInfoOps": {
+              "metric": "Hadoop:service=NameNode,name=NameNode.FileInfoOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/MissingBlocks": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.MissingBlocks",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/sendHeartbeat_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.sendHeartbeat_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcProcessingTime_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcProcessingTime_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/blockReport_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReport_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/CapacityRemaining": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystemState.CapacityRemaining",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rpcAuthenticationSuccesses": {
+              "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthenticationSuccesses",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/PendingDeletionBlocks": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.PendingDeletionBlocks",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rpcAuthenticationFailures": {
+              "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthenticationFailures",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/getEditLogSize_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getEditLogSize_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/memHeapCommittedM": {
+              "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemHeapCommittedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/FilesInGetListingOps": {
+              "metric": "Hadoop:service=NameNode,name=NameNode.FilesInGetListingOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsRunnable": {
+              "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsRunnable",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/BlocksTotal": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.BlocksTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/CapacityTotal": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/complete_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.complete_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/LiveNodes": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.LiveNodes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsNew": {
+              "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsNew",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/rollFsImage_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollFsImage_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rpcAuthorizationFailures": {
+              "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthorizationFailures",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/Syncs_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=NameNode.Syncs_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/StartTime": {
+              "metric": "java.lang:type=Runtime.StartTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTime_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcQueueTime_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/blockReceived_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReceived_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/rollEditLog_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollEditLog_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/DeadNodes": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DeadNodes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/SentBytes": {
+              "metric": "Hadoop:service=NameNode,name=RpcActivity.SentBytes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/HeapMemoryUsed": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/FilesTotal": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.FilesTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/Version": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Version",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/logWarn": {
+              "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogWarn",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/ExcessBlocks": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.ExcessBlocks",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsTimedWaiting": {
+              "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsTimedWaiting",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/gcCount": {
+              "metric": "Hadoop:service=NameNode,name=JvmMetrics.GcCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/PercentRemaining": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentRemaining",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/ReceivedBytes": {
+              "metric": "Hadoop:service=NameNode,name=RpcActivity.ReceivedBytes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/blockReport_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=NameNode.blockReport_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/NonHeapMemoryMax": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/rollFsImage_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollFsImage_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/NumOpenConnections": {
+              "metric": "Hadoop:service=NameNode,name=RpcActivity.NumOpenConnections",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/memHeapUsedM": {
+              "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemHeapUsedM",
+              "unit": "MB",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.ScheduledReplicationBlocks",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsWaiting": {
+              "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsWaiting",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/BlocksTotal": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.BlocksTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/gcTimeMillis": {
+              "metric": "Hadoop:service=NameNode,name=JvmMetrics.GcTimeMillis",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/getBlockLocations_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getBlockLocations_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/Transactions_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=NameNode.Transactions_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/create_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.create_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/CapacityTotal": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Total",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsTerminated": {
+              "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsTerminated",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/CapacityRemainingGB": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityRemainingGB",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/Transactions_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=NameNode.Transactions_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/MissingBlocks": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.MissingBlocks",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/Threads": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Threads",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/callQueueLen": {
+              "metric": "Hadoop:service=NameNode,name=RpcActivity.callQueueLen",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/CorruptBlocks": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.CorruptBlocks",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/blockReport_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReport_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/TotalFiles": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalFiles",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/logInfo": {
+              "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogInfo",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/NameDirStatuses": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.NameDirStatuses",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/getListing_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getListing_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/rollEditLog_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollEditLog_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/addBlock_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.addBlock_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcProcessingTime_num_ops": {
+              "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcProcessingTime_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/CapacityUsed": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Used",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/UnderReplicatedBlocks": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.UnderReplicatedBlocks",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/sendHeartbeat_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.sendHeartbeat_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/CreateFileOps": {
+              "metric": "Hadoop:service=NameNode,name=NameNode.CreateFileOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/logError": {
+              "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogError",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/ugi/loginFailure_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=ugi.loginFailure_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/getProtocolVersion_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getProtocolVersion_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rpcAuthorizationSuccesses": {
+              "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthorizationSuccesses",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/Version": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Version",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/getListing_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getListing_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/logFatal": {
+              "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogFatal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/NonDfsUsedSpace": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.NonDfsUsedSpace",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpcdetailed/renewLease_avg_time": {
+              "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.renewLease_avg_time",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/TotalBlocks": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalBlocks",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/FSNamesystem/CapacityNonDFSUsed": {
+              "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityNonDFSUsed",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/GcTimeMillisConcurrentMarkSweep": {
+              "metric": "Hadoop:service=NameNode,name=JvmMetrics.GcTimeMillisConcurrentMarkSweep",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/GcCountConcurrentMarkSweep": {
+              "metric": "Hadoop:service=NameNode,name=JvmMetrics.GcCountConcurrentMarkSweep",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTimeAvgTime": {
+              "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcQueueTimeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcProcessingTimeAvgTime": {
+              "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcProcessingTimeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/dfs/namenode/CorruptFiles": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.CorruptFiles",
+              "pointInTime": true,
+              "temporal": false
+            }
+          }
+        }
+      }
+    ],
+    "HostComponent": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "default": {
+            "metrics/cpu/cpu_idle":{
+              "metric":"cpu_idle",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_nice":{
+              "metric":"cpu_nice",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_system":{
+              "metric":"cpu_system",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_user":{
+              "metric":"cpu_user",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_wio":{
+              "metric":"cpu_wio",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/disk_free":{
+              "metric":"disk_free",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/disk_total":{
+              "metric":"disk_total",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/load/load_fifteen":{
+              "metric":"load_fifteen",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/load/load_five":{
+              "metric":"load_five",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/load/load_one":{
+              "metric":"load_one",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_buffers":{
+              "metric":"mem_buffers",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_cached":{
+              "metric":"mem_cached",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_free":{
+              "metric":"mem_free",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_shared":{
+              "metric":"mem_shared",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_total":{
+              "metric":"mem_total",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/swap_free":{
+              "metric":"swap_free",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/swap_total":{
+              "metric":"swap_total",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/network/bytes_in":{
+              "metric":"bytes_in",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/network/bytes_out":{
+              "metric":"bytes_out",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/network/pkts_in":{
+              "metric":"pkts_in",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/network/pkts_out":{
+              "metric":"pkts_out",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/process/proc_run":{
+              "metric":"proc_run",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/process/proc_total":{
+              "metric":"proc_total",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/read_count":{
+              "metric":"read_count",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/write_count":{
+              "metric":"write_count",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/read_bytes":{
+              "metric":"read_bytes",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/write_bytes":{
+              "metric":"write_bytes",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/read_time":{
+              "metric":"read_time",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/write_time":{
+              "metric":"write_time",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/dfs/FSNamesystem/TotalLoad": {
+              "metric": "dfs.FSNamesystem.TotalLoad",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/CapacityTotal": {
+              "metric": "dfs.FSNamesystem.CapacityTotal",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/CapacityUsed": {
+              "metric": "dfs.FSNamesystem.CapacityUsed",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/CapacityRemaining": {
+              "metric": "dfs.FSNamesystem.CapacityRemaining",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/CapacityNonDFSUsed": {
+              "metric": "dfs.FSNamesystem.CapacityUsedNonDFS",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/FSNamesystem/BlockCapacity": {
+              "metric": "dfs.FSNamesystem.BlockCapacity",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/GetListingOps": {
+              "metric": "dfs.namenode.GetListingOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/FilesAppended": {
+              "metric": "dfs.namenode.FilesAppended",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/dfs/namenode/TotalFileOps": {
+              "metric": "dfs.namenode.TotalFileOps",
+              "pointInTime": true,
+              "temporal": tr

<TRUNCATED>

[41/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/files/draining_servers.rb
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/files/draining_servers.rb b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/files/draining_servers.rb
new file mode 100755
index 0000000..a3958a6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/files/draining_servers.rb
@@ -0,0 +1,164 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Add or remove servers from draining mode via zookeeper
+
+require 'optparse'
+include Java
+
+import org.apache.hadoop.hbase.HBaseConfiguration
+import org.apache.hadoop.hbase.client.HBaseAdmin
+import org.apache.hadoop.hbase.zookeeper.ZKUtil
+import org.apache.commons.logging.Log
+import org.apache.commons.logging.LogFactory
+
+# Name of this script
+NAME = "draining_servers"
+
+# Do command-line parsing
+options = {}
+optparse = OptionParser.new do |opts|
+  opts.banner = "Usage: ./hbase org.jruby.Main #{NAME}.rb [options] add|remove|list <hostname>|<host:port>|<servername> ..."
+  opts.separator 'Add remove or list servers in draining mode. Can accept either hostname to drain all region servers' +
+                 'in that host, a host:port pair or a host,port,startCode triplet. More than one server can be given separated by space'
+  opts.on('-h', '--help', 'Display usage information') do
+    puts opts
+    exit
+  end
+  options[:debug] = false
+  opts.on('-d', '--debug', 'Display extra debug logging') do
+    options[:debug] = true
+  end
+end
+optparse.parse!
+
+# Return array of servernames where servername is hostname+port+startcode
+# comma-delimited
+def getServers(admin)
+  serverInfos = admin.getClusterStatus().getServerInfo()
+  servers = []
+  for server in serverInfos
+    servers << server.getServerName()
+  end
+  return servers
+end
+
+def getServerNames(hostOrServers, config)
+  ret = []
+
+  for hostOrServer in hostOrServers
+    # check whether it is already serverName. No need to connect to cluster
+    parts = hostOrServer.split(',')
+    if parts.size() == 3
+      ret << hostOrServer
+    else
+      admin = HBaseAdmin.new(config) if not admin
+      servers = getServers(admin)
+
+      hostOrServer = hostOrServer.gsub(/:/, ",")
+      for server in servers
+        ret << server if server.start_with?(hostOrServer)
+      end
+    end
+  end
+
+  admin.close() if admin
+  return ret
+end
+
+def addServers(options, hostOrServers)
+  config = HBaseConfiguration.create()
+  servers = getServerNames(hostOrServers, config)
+
+  zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil)
+  parentZnode = zkw.drainingZNode
+
+  begin
+    for server in servers
+      node = ZKUtil.joinZNode(parentZnode, server)
+      ZKUtil.createAndFailSilent(zkw, node)
+    end
+  ensure
+    zkw.close()
+  end
+end
+
+def removeServers(options, hostOrServers)
+  config = HBaseConfiguration.create()
+  servers = getServerNames(hostOrServers, config)
+
+  zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil)
+  parentZnode = zkw.drainingZNode
+
+  begin
+    for server in servers
+      node = ZKUtil.joinZNode(parentZnode, server)
+      ZKUtil.deleteNodeFailSilent(zkw, node)
+    end
+  ensure
+    zkw.close()
+  end
+end
+
+# list servers in draining mode
+def listServers(options)
+  config = HBaseConfiguration.create()
+
+  zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil)
+  parentZnode = zkw.drainingZNode
+
+  servers = ZKUtil.listChildrenNoWatch(zkw, parentZnode)
+  servers.each {|server| puts server}
+end
+
+hostOrServers = ARGV[1..ARGV.size()]
+
+# Create a logger and disable the DEBUG-level annoying client logging
+def configureLogging(options)
+  apacheLogger = LogFactory.getLog(NAME)
+  # Configure log4j to not spew so much
+  unless (options[:debug])
+    logger = org.apache.log4j.Logger.getLogger("org.apache.hadoop.hbase")
+    logger.setLevel(org.apache.log4j.Level::WARN)
+    logger = org.apache.log4j.Logger.getLogger("org.apache.zookeeper")
+    logger.setLevel(org.apache.log4j.Level::WARN)
+  end
+  return apacheLogger
+end
+
+# Create a logger and save it to ruby global
+$LOG = configureLogging(options)
+case ARGV[0]
+  when 'add'
+    if ARGV.length < 2
+      puts optparse
+      exit 1
+    end
+    addServers(options, hostOrServers)
+  when 'remove'
+    if ARGV.length < 2
+      puts optparse
+      exit 1
+    end
+    removeServers(options, hostOrServers)
+  when 'list'
+    listServers(options)
+  else
+    puts optparse
+    exit 3
+end

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/files/hbaseSmokeVerify.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/files/hbaseSmokeVerify.sh b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/files/hbaseSmokeVerify.sh
new file mode 100755
index 0000000..19276f3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/files/hbaseSmokeVerify.sh
@@ -0,0 +1,34 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+conf_dir=$1
+data=$2
+hbase_cmd=$3
+echo "scan 'ambarismoketest'" | $hbase_cmd --config $conf_dir shell > /tmp/hbase_chk_verify
+cat /tmp/hbase_chk_verify
+echo "Looking for $data"
+awk '/id/,/date/' /tmp/hbase_chk_verify
+if [ "$?" -ne 0 ]
+then
+  exit 1
+fi
+
+grep -q '1 row(s)' /tmp/hbase_chk_verify

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/__init__.py
new file mode 100755
index 0000000..5561e10
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/functions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/functions.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/functions.py
new file mode 100755
index 0000000..2bf4e9d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/functions.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+import re
+import math
+import datetime
+
+from resource_management.core.shell import checked_call
+
+def calc_xmn_from_xms(heapsize_str, xmn_percent, xmn_max):
+  """
+  @param heapsize_str: str (e.g '1000m')
+  @param xmn_percent: float (e.g 0.2)
+  @param xmn_max: integer (e.g 512)
+  """
+  heapsize = int(re.search('\d+',heapsize_str).group(0))
+  heapsize_unit = re.search('\D+',heapsize_str).group(0)
+  xmn_val = int(math.floor(heapsize*xmn_percent))
+  xmn_val -= xmn_val % 8
+
+  result_xmn_val = xmn_max if xmn_val > xmn_max else xmn_val
+  return str(result_xmn_val) + heapsize_unit
+
+def ensure_unit_for_memory(memory_size):
+  memory_size_values = re.findall('\d+', str(memory_size))
+  memory_size_unit = re.findall('\D+', str(memory_size))
+
+  if len(memory_size_values) > 0:
+    unit = 'm'
+    if len(memory_size_unit) > 0:
+      unit = memory_size_unit[0]
+    if unit not in ['b', 'k', 'm', 'g', 't', 'p']:
+      raise Exception("Memory size unit error. %s - wrong unit" % unit)
+    return "%s%s" % (memory_size_values[0], unit)
+  else:
+    raise Exception('Memory size can not be calculated')

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/hbase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/hbase.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/hbase.py
new file mode 100755
index 0000000..c5ffeb6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/hbase.py
@@ -0,0 +1,232 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+
+from resource_management import *
+import sys
+import fileinput
+import shutil
+from resource_management.core.exceptions import ComponentIsNotRunning
+from resource_management.core.logger import Logger
+from resource_management.core import shell
+
+def hbase(name=None # 'master' or 'regionserver' or 'client'
+              ):
+  import params
+
+  #Directory( params.hbase_conf_dir_prefix,
+  #    mode=0755
+  #)
+
+  # On some OS this folder could be not exists, so we will create it before pushing there files
+  Directory(params.limits_conf_dir,
+            create_parents=True,
+            owner='root',
+            group='root'
+            )
+
+  File(os.path.join(params.limits_conf_dir, 'hbase.conf'),
+       owner='root',
+       group='root',
+       mode=0644,
+       content=Template("hbase.conf.j2")
+       )
+
+  Directory( params.hbase_conf_dir,
+      owner = params.hbase_user,
+      group = params.user_group,
+      create_parents = True
+  )
+
+  '''Directory (params.tmp_dir,
+             owner = params.hbase_user,
+             mode=0775,
+             create_parents = True,
+             cd_access="a",
+  )
+
+  Directory (params.local_dir,
+             owner = params.hbase_user,
+             group = params.user_group,
+             mode=0775,
+             create_parents = True
+  )
+
+  Directory (os.path.join(params.local_dir, "jars"),
+             owner = params.hbase_user,
+             group = params.user_group,
+             mode=0775,
+             create_parents = True
+  )'''
+
+  parent_dir = os.path.dirname(params.tmp_dir)
+  # In case if we have several placeholders in path
+  while ("${" in parent_dir):
+    parent_dir = os.path.dirname(parent_dir)
+  if parent_dir != os.path.abspath(os.sep) :
+    Directory (parent_dir,
+          create_parents = True,
+          cd_access="a",
+    )
+    Execute(("chmod", "1777", parent_dir), sudo=True)
+
+  XmlConfig( "hbase-site.xml",
+            conf_dir = params.hbase_conf_dir,
+            configurations = params.config['configurations']['hbase-site'],
+            configuration_attributes=params.config['configuration_attributes']['hbase-site'],
+            owner = params.hbase_user,
+            group = params.user_group
+  )
+
+  XmlConfig( "core-site.xml",
+             conf_dir = params.hbase_conf_dir,
+             configurations = params.config['configurations']['core-site'],
+             configuration_attributes=params.config['configuration_attributes']['core-site'],
+             owner = params.hbase_user,
+             group = params.user_group
+  )
+
+  if 'hdfs-site' in params.config['configurations']:
+    XmlConfig( "hdfs-site.xml",
+            conf_dir = params.hbase_conf_dir,
+            configurations = params.config['configurations']['hdfs-site'],
+            configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
+            owner = params.hbase_user,
+            group = params.user_group
+    )
+
+    #XmlConfig("hdfs-site.xml",
+    #        conf_dir=params.hadoop_conf_dir,
+    #        configurations=params.config['configurations']['hdfs-site'],
+    #        configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
+    #        owner=params.hdfs_user,
+    #        group=params.user_group
+    #)
+
+  if 'hbase-policy' in params.config['configurations']:
+    XmlConfig( "hbase-policy.xml",
+            conf_dir = params.hbase_conf_dir,
+            configurations = params.config['configurations']['hbase-policy'],
+            configuration_attributes=params.config['configuration_attributes']['hbase-policy'],
+            owner = params.hbase_user,
+            group = params.user_group
+    )
+  # Manually overriding ownership of file installed by hadoop package
+  else:
+    File( format("{params.hbase_conf_dir}/hbase-policy.xml"),
+      owner = params.hbase_user,
+      group = params.user_group
+    )
+
+  File(format("{hbase_conf_dir}/hbase-env.sh"),
+       owner = params.hbase_user,
+       content=InlineTemplate(params.hbase_env_sh_template)
+  )
+
+  hbase_TemplateConfig( params.metric_prop_file_name,
+    tag = 'GANGLIA-MASTER' if name == 'master' else 'GANGLIA-RS'
+  )
+
+  hbase_TemplateConfig( 'regionservers')
+
+  if params.security_enabled:
+    hbase_TemplateConfig( format("hbase_{name}_jaas.conf"))
+
+  if name != "client":
+    Directory( params.pid_dir,
+      owner = params.hbase_user,
+      create_parents = True
+    )
+
+    Directory (params.log_dir,
+      owner = params.hbase_user,
+      create_parents = True
+    )
+
+  if (params.log4j_props != None):
+    File(format("{params.hbase_conf_dir}/log4j.properties"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.hbase_user,
+         content=params.log4j_props
+    )
+  elif (os.path.exists(format("{params.hbase_conf_dir}/log4j.properties"))):
+    File(format("{params.hbase_conf_dir}/log4j.properties"),
+      mode=0644,
+      group=params.user_group,
+      owner=params.hbase_user
+    )
+  if name in ["master","regionserver"]:
+    params.HdfsResource(params.hbase_hdfs_root_dir,
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.hbase_user
+    )
+    params.HdfsResource(params.hbase_staging_dir,
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.hbase_user,
+                         mode=0711
+    )
+    params.HdfsResource(params.hbase_hdfs_user_dir,
+                         type="directory",
+                          action="create_on_execute",
+                          owner=params.hbase_user,
+                          mode=params.hbase_hdfs_user_mode
+    )
+    params.HdfsResource(None, action="execute")
+
+# create java-opts in etc/hbase/conf dir for iop.version
+  File(format("{params.hbase_conf_dir}/java-opts"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.hbase_user,
+         content=params.hbase_javaopts_properties
+    )
+
+def hbase_TemplateConfig(name,
+                         tag=None
+                         ):
+  import params
+
+  TemplateConfig( format("{hbase_conf_dir}/{name}"),
+      owner = params.hbase_user,
+      template_tag = tag
+  )
+
+def get_iop_version():
+  try:
+    command = 'iop-select status hadoop-client'
+    return_code, iop_output = shell.call(command, timeout=20)
+  except Exception, e:
+    Logger.error(str(e))
+    raise Fail('Unable to execute iop-select command to retrieve the version.')
+
+  if return_code != 0:
+    raise Fail(
+      'Unable to determine the current version because of a non-zero return code of {0}'.format(str(return_code)))
+
+  iop_version = re.sub('hadoop-client - ', '', iop_output)
+  match = re.match('[0-9]+.[0-9]+.[0-9]+.[0-9]+', iop_version)
+
+  if match is None:
+    raise Fail('Failed to get extracted version')
+
+  return iop_version

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/hbase_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/hbase_client.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/hbase_client.py
new file mode 100755
index 0000000..3ed6103
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/hbase_client.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+
+from hbase import hbase
+
+
+class HbaseClient(Script):
+
+  def get_component_name(self):
+    return "hbase-client"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
+      conf_select.select(params.stack_name, "hbase", params.version)
+      stack_select.select("hbase-client", params.version)
+      #Execute(format("stack-select set hbase-client {version}"))
+
+      # set all of the hadoop clientss since hbase client is upgraded as part
+      # of the final "CLIENTS" group and we need to ensure that hadoop-client
+      # is also set
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-client", params.version)
+      #Execute(format("stack-select set hadoop-client {version}"))
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hbase(name='client')
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+
+if __name__ == "__main__":
+  HbaseClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/hbase_decommission.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/hbase_decommission.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/hbase_decommission.py
new file mode 100755
index 0000000..fb1379a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/hbase_decommission.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+def hbase_decommission(env):
+  import params
+
+  env.set_params(params)
+  kinit_cmd = params.kinit_cmd
+
+  File(params.region_drainer,
+       content=StaticFile("draining_servers.rb"),
+       mode=0755
+  )
+
+  if params.hbase_excluded_hosts and params.hbase_excluded_hosts.split(","):
+    hosts = params.hbase_excluded_hosts.split(",")
+  elif params.hbase_included_hosts and params.hbase_included_hosts.split(","):
+    hosts = params.hbase_included_hosts.split(",")
+
+  if params.hbase_drain_only:
+    for host in hosts:
+      if host:
+        regiondrainer_cmd = format(
+          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {region_drainer} remove {host}")
+        Execute(regiondrainer_cmd,
+                user=params.hbase_user,
+                logoutput=True
+        )
+        pass
+    pass
+
+  else:
+    for host in hosts:
+      if host:
+        regiondrainer_cmd = format(
+          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {region_drainer} add {host}")
+        regionmover_cmd = format(
+          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {region_mover} unload {host}")
+
+        Execute(regiondrainer_cmd,
+                user=params.hbase_user,
+                logoutput=True
+        )
+
+        Execute(regionmover_cmd,
+                user=params.hbase_user,
+                logoutput=True
+        )
+      pass
+    pass
+  pass
+
+
+  pass

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/hbase_master.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/hbase_master.py
new file mode 100755
index 0000000..b1b6cc4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/hbase_master.py
@@ -0,0 +1,129 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_XML
+from hbase import hbase
+from hbase_service import hbase_service
+from hbase_decommission import hbase_decommission
+import upgrade
+
+class HbaseMaster(Script):
+
+  def get_component_name(self):
+    return "hbase-master"
+
+  def install(self, env):
+    self.install_packages(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hbase(name='master')
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    upgrade.prestart(env, "hbase-master")
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env) # for security
+    hbase_service( 'master',
+      action = 'start'
+    )
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    hbase_service( 'master',
+      action = 'stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{pid_dir}/hbase-{hbase_user}-master.pid")
+    check_process_status(pid_file)
+
+  def security_status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    if status_params.security_enabled:
+      props_value_check = {"hbase.security.authentication" : "kerberos",
+                           "hbase.security.authorization": "true"}
+      props_empty_check = ['hbase.master.keytab.file',
+                           'hbase.master.kerberos.principal']
+      props_read_check = ['hbase.master.keytab.file']
+      hbase_site_expectations = build_expectations('hbase-site', props_value_check, props_empty_check,
+                                                  props_read_check)
+
+      hbase_expectations = {}
+      hbase_expectations.update(hbase_site_expectations)
+
+      security_params = get_params_from_filesystem(status_params.hbase_conf_dir,
+                                                   {'hbase-site.xml': FILE_TYPE_XML})
+      result_issues = validate_security_config_properties(security_params, hbase_expectations)
+      if not result_issues:  # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if ( 'hbase-site' not in security_params
+               or 'hbase.master.keytab.file' not in security_params['hbase-site']
+               or 'hbase.master.kerberos.principal' not in security_params['hbase-site']):
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out(
+              {"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.hbase_user,
+                                security_params['hbase-site']['hbase.master.keytab.file'],
+                                security_params['hbase-site']['hbase.master.kerberos.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+  def decommission(self, env):
+    import params
+    env.set_params(params)
+
+    hbase_decommission(env)
+
+
+if __name__ == "__main__":
+  HbaseMaster().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/hbase_regionserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/hbase_regionserver.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/hbase_regionserver.py
new file mode 100755
index 0000000..696b173
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/hbase_regionserver.py
@@ -0,0 +1,131 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_XML
+from hbase import hbase
+from hbase_service import hbase_service
+import upgrade
+
+
+class HbaseRegionServer(Script):
+
+  def get_component_name(self):
+    return "hbase-regionserver"
+
+  def install(self, env):
+    self.install_packages(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hbase(name='regionserver')
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    upgrade.prestart(env, "hbase-regionserver")
+
+  def post_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    upgrade.post_regionserver(env)
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env) # for security
+    hbase_service( 'regionserver',
+      action = 'start'
+    )
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    hbase_service( 'regionserver',
+      action = 'stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{pid_dir}/hbase-{hbase_user}-regionserver.pid")
+    check_process_status(pid_file)
+
+  def security_status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    if status_params.security_enabled:
+      props_value_check = {"hbase.security.authentication" : "kerberos",
+                           "hbase.security.authorization": "true"}
+      props_empty_check = ['hbase.regionserver.keytab.file',
+                           'hbase.regionserver.kerberos.principal']
+      props_read_check = ['hbase.regionserver.keytab.file']
+      hbase_site_expectations = build_expectations('hbase-site', props_value_check, props_empty_check,
+                                                   props_read_check)
+
+      hbase_expectations = {}
+      hbase_expectations.update(hbase_site_expectations)
+
+      security_params = get_params_from_filesystem(status_params.hbase_conf_dir,
+                                                   {'hbase-site.xml': FILE_TYPE_XML})
+      result_issues = validate_security_config_properties(security_params, hbase_expectations)
+      if not result_issues:  # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if ( 'hbase-site' not in security_params
+               or 'hbase.regionserver.keytab.file' not in security_params['hbase-site']
+               or 'hbase.regionserver.kerberos.principal' not in security_params['hbase-site']):
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out(
+              {"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.hbase_user,
+                                security_params['hbase-site']['hbase.regionserver.keytab.file'],
+                                security_params['hbase-site']['hbase.regionserver.kerberos.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+  def decommission(self, env):
+    print "Decommission not yet implemented!"
+
+
+if __name__ == "__main__":
+  HbaseRegionServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/hbase_restgatewayserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/hbase_restgatewayserver.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/hbase_restgatewayserver.py
new file mode 100755
index 0000000..f8dc53d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/hbase_restgatewayserver.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from hbase import hbase
+from hbase_service import hbase_service
+from hbase_decommission import hbase_decommission
+import upgrade
+
+
+
+class HbaseRestGatewayServer(Script):
+  def get_component_name(self):
+    return "hbase-restserver"
+
+  def install(self, env):
+    self.install_packages(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hbase(name='rest')
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    upgrade.prestart(env, "hbase-restserver")
+
+  def post_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env) # for security
+
+    hbase_service( 'rest',
+      action = 'start'
+    )
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    hbase_service( 'rest',
+      action = 'stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{pid_dir}/hbase-{hbase_user}-rest.pid")
+    check_process_status(pid_file)
+
+  def decommission(self, env):
+    import params
+    env.set_params(params)
+
+    hbase_decommission(env)
+
+
+if __name__ == "__main__":
+  HbaseRestGatewayServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/hbase_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/hbase_service.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/hbase_service.py
new file mode 100755
index 0000000..f4cc5d2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/hbase_service.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+def hbase_service(
+  name,
+  action = 'start'): # 'start' or 'stop' or 'status'
+
+    import params
+
+    role = name
+    cmd = format("{daemon_script} --config {hbase_conf_dir}")
+    pid_file = format("{pid_dir}/hbase-{hbase_user}-{role}.pid")
+    no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1")
+
+    if action == 'start':
+      daemon_cmd = format("{cmd} start {role}")
+
+      Execute ( daemon_cmd,
+        not_if = no_op_test,
+        user = params.hbase_user
+      )
+    elif action == 'stop':
+      daemon_cmd = format("{cmd} stop {role}")
+
+      Execute ( daemon_cmd,
+        user = params.hbase_user,
+        # BUGFIX: hbase regionserver sometimes hangs when nn is in safemode
+        timeout = 30,
+        on_timeout = format("! ( {no_op_test} ) || {sudo} -H -E kill -9 `cat {pid_file}`"),
+      )
+
+      Execute (format("rm -f {pid_file}"))

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/hbase_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/hbase_upgrade.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/hbase_upgrade.py
new file mode 100755
index 0000000..610f527
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/hbase_upgrade.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management.libraries.script import Script
+from resource_management.core.resources.system import Execute
+
+class HbaseMasterUpgrade(Script):
+
+  def snapshot(self, env):
+    import params
+
+    snap_cmd = "echo 'snapshot_all' | {0} shell".format(params.hbase_cmd)
+
+    exec_cmd = "{0} {1}".format(params.kinit_cmd, snap_cmd)
+
+    Execute(exec_cmd, user=params.hbase_user)
+
+if __name__ == "__main__":
+  HbaseMasterUpgrade().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/params.py
new file mode 100755
index 0000000..97657ad
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/params.py
@@ -0,0 +1,197 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+from functions import calc_xmn_from_xms, ensure_unit_for_memory
+from resource_management.libraries.functions.version import format_stack_version, compare_versions
+from resource_management.libraries.functions.default import default
+from resource_management import *
+from resource_management.libraries.functions import conf_select, stack_select
+from resource_management.libraries.functions.expect import expect
+import status_params
+from hbase import *
+
+def treat_value_as_mb(value1):
+  value = str(value1)
+  try:
+    part = int(value.strip()[:-1]) if value.lower().strip()[-1:] == 'm' else int(value)
+    return str(part) + 'm'
+  except:
+    return None
+
+# server configurations
+config = Script.get_config()
+exec_tmp_dir = Script.get_tmp_dir()
+sudo = AMBARI_SUDO_BINARY
+
+stack_name = default("/hostLevelParams/stack_name", None)
+
+version = default("/commandParams/version", None)
+
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+stack_version = format_stack_version(stack_version_unformatted)
+
+component_directory = status_params.component_directory
+
+#hadoop params
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+daemon_script = format('/usr/iop/current/{component_directory}/bin/hbase-daemon.sh')
+region_mover = format('/usr/iop/current/{component_directory}/bin/region_mover.rb')
+region_drainer = format('/usr/iop/current/{component_directory}/bin/draining_servers.rb')
+hbase_cmd = format('/usr/iop/current/{component_directory}/bin/hbase')
+
+limits_conf_dir = "/etc/security/limits.d"
+hbase_conf_dir = status_params.hbase_conf_dir
+hbase_excluded_hosts = config['commandParams']['excluded_hosts']
+hbase_drain_only = default("/commandParams/mark_draining_only",False)
+hbase_included_hosts = config['commandParams']['included_hosts']
+
+hbase_user = status_params.hbase_user
+hbase_principal_name = config['configurations']['hbase-env']['hbase_principal_name']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+_authentication = config['configurations']['core-site']['hadoop.security.authentication']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+hbase_hdfs_user_dir = format("/user/{hbase_user}")
+hbase_hdfs_user_mode = 0755
+
+# this is "hadoop-metrics.properties" for 1.x stacks
+metric_prop_file_name = "hadoop-metrics2-hbase.properties"
+
+# not supporting 32 bit jdk.
+java64_home = config['hostLevelParams']['java_home']
+
+log_dir = config['configurations']['hbase-env']['hbase_log_dir']
+master_heapsize_cfg = config['configurations']['hbase-env']['hbase_master_heapsize']
+master_heapsize = treat_value_as_mb(master_heapsize_cfg)
+
+hbase_javaopts_properties = config['configurations']['hbase-javaopts-properties']['content']
+
+iop_full_version = get_iop_version()
+
+hbase_javaopts_properties = str(hbase_javaopts_properties)
+if hbase_javaopts_properties.find('-Diop.version') == -1:
+  hbase_javaopts_properties = hbase_javaopts_properties+ ' -Diop.version=' + str(iop_full_version)
+
+regionserver_heapsize = ensure_unit_for_memory(config['configurations']['hbase-env']['hbase_regionserver_heapsize'])
+regionserver_xmn_max = config['configurations']['hbase-env']['hbase_regionserver_xmn_max']
+regionserver_xmn_percent = expect("/configurations/hbase-env/hbase_regionserver_xmn_ratio", float) #AMBARI-15614
+regionserver_xmn_size = calc_xmn_from_xms(regionserver_heapsize, regionserver_xmn_percent, regionserver_xmn_max)
+
+pid_dir = status_params.pid_dir
+tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
+local_dir = config['configurations']['hbase-site']['hbase.local.dir']
+
+# TODO UPGRADE default, update site during upgrade
+#_local_dir_conf = default('/configurations/hbase-site/hbase.local.dir', "${hbase.tmp.dir}/local")
+#local_dir = substitute_vars(_local_dir_conf, config['configurations']['hbase-site'])
+
+client_jaas_config_file = format("{hbase_conf_dir}/hbase_client_jaas.conf")
+master_jaas_config_file = format("{hbase_conf_dir}/hbase_master_jaas.conf")
+regionserver_jaas_config_file = format("{hbase_conf_dir}/hbase_regionserver_jaas.conf")
+
+ganglia_server_hosts = default('/clusterHostInfo/ganglia_server_host', []) # is not passed when ganglia is not present
+ganglia_server_host = '' if len(ganglia_server_hosts) == 0 else ganglia_server_hosts[0]
+
+ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
+has_metric_collector = not len(ams_collector_hosts) == 0
+if has_metric_collector:
+  metric_collector_host = ams_collector_hosts[0]
+  metric_collector_port = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
+  if metric_collector_port and metric_collector_port.find(':') != -1:
+    metric_collector_port = metric_collector_port.split(':')[1]
+  pass
+
+# if hbase is selected the hbase_rs_hosts, should not be empty, but still default just in case
+if 'slave_hosts' in config['clusterHostInfo']:
+  rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/slave_hosts') #if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
+else:
+  rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/all_hosts')
+
+smoke_test_user = config['configurations']['cluster-env']['smokeuser']
+smokeuser_principal =  config['configurations']['cluster-env']['smokeuser_principal_name']
+smokeuser_permissions = "RWXCA"
+service_check_data = functions.get_unique_id_and_date()
+user_group = config['configurations']['cluster-env']["user_group"]
+
+if security_enabled:
+  _hostname_lowercase = config['hostname'].lower()
+  master_jaas_princ = config['configurations']['hbase-site']['hbase.master.kerberos.principal'].replace('_HOST',_hostname_lowercase)
+  regionserver_jaas_princ = config['configurations']['hbase-site']['hbase.regionserver.kerberos.principal'].replace('_HOST',_hostname_lowercase)
+  rest_server_jaas_princ = config['configurations']['hbase-site']['hbase.rest.kerberos.principal'].replace('_HOST',_hostname_lowercase)
+  rest_server_spnego_jaas_princ = config['configurations']['hbase-site']['hbase.rest.authentication.kerberos.principal'].replace('_HOST',_hostname_lowercase)
+
+master_keytab_path = config['configurations']['hbase-site']['hbase.master.keytab.file']
+regionserver_keytab_path = config['configurations']['hbase-site']['hbase.regionserver.keytab.file']
+rest_server_keytab_path = config['configurations']['hbase-site']['hbase.rest.keytab.file']
+rest_server_spnego_keytab_path = config['configurations']['hbase-site']['hbase.rest.authentication.kerberos.keytab']
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+hbase_user_keytab = config['configurations']['hbase-env']['hbase_user_keytab']
+kinit_path_local = functions.get_kinit_path()
+if security_enabled:
+  kinit_cmd = format("{kinit_path_local} -kt {hbase_user_keytab} {hbase_principal_name};")
+else:
+  kinit_cmd = ""
+
+#log4j.properties
+if (('hbase-log4j' in config['configurations']) and ('content' in config['configurations']['hbase-log4j'])):
+  log4j_props = config['configurations']['hbase-log4j']['content']
+else:
+  log4j_props = None
+
+hbase_env_sh_template = config['configurations']['hbase-env']['content']
+
+hbase_hdfs_root_dir = config['configurations']['hbase-site']['hbase.rootdir']
+hbase_staging_dir = "/apps/hbase/staging"
+#for create_hdfs_directory
+hostname = config["hostname"]
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+kinit_path_local = functions.get_kinit_path()
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
+import functools
+#create partial functions with common arguments for every HdfsDirectory call
+#to create hdfs directory we need to call params.HdfsDirectory in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
+)
+
+if stack_version != "" and compare_versions(stack_version, '4.0') >= 0:
+  command_role = default("/role", "")
+  if command_role == "HBASE_MASTER" or command_role == "HBASE_REGIONSERVER":
+    role_root = "master" if command_role == "HBASE_MASTER" else "regionserver"
+
+    daemon_script=format("/usr/iop/current/hbase-{role_root}/bin/hbase-daemon.sh")
+    region_mover = format("/usr/iop/current/hbase-{role_root}/bin/region_mover.rb")
+    region_drainer = format("/usr/iop/current/hbase-{role_root}/bin/draining_servers.rb")
+    hbase_cmd = format("/usr/iop/current/hbase-{role_root}/bin/hbase")

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/service_check.py
new file mode 100755
index 0000000..c243c16
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/service_check.py
@@ -0,0 +1,78 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import functions
+
+
+class HbaseServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    output_file = "/apps/hbase/data/ambarismoketest"
+    test_cmd = format("fs -test -e {output_file}")
+    smokeuser_kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};") if params.security_enabled else ""
+    hbase_servicecheck_file = format("{exec_tmp_dir}/hbase-smoke.sh")
+
+    File( format("{exec_tmp_dir}/hbaseSmokeVerify.sh"),
+      content = StaticFile("hbaseSmokeVerify.sh"),
+      mode = 0755
+    )
+
+    File( hbase_servicecheck_file,
+      mode = 0755,
+      content = Template('hbase-smoke.sh.j2')
+    )
+
+    if params.security_enabled:
+      hbase_grant_premissions_file = format("{exec_tmp_dir}/hbase_grant_permissions.sh")
+      grantprivelegecmd = format("{kinit_cmd} {hbase_cmd} shell {hbase_grant_premissions_file}")
+
+      File( hbase_grant_premissions_file,
+        owner   = params.hbase_user,
+        group   = params.user_group,
+        mode    = 0644,
+        content = Template('hbase_grant_permissions.j2')
+      )
+
+      Execute( grantprivelegecmd,
+        user = params.hbase_user,
+      )
+
+    servicecheckcmd = format("{smokeuser_kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} shell {hbase_servicecheck_file}")
+    smokeverifycmd = format("{smokeuser_kinit_cmd} {exec_tmp_dir}/hbaseSmokeVerify.sh {hbase_conf_dir} {service_check_data} {hbase_cmd}")
+
+    Execute( servicecheckcmd,
+      tries     = 3,
+      try_sleep = 5,
+      user = params.smoke_test_user,
+      logoutput = True
+    )
+
+    Execute ( smokeverifycmd,
+      tries     = 3,
+      try_sleep = 5,
+      user = params.smoke_test_user,
+      logoutput = True
+    )
+
+if __name__ == "__main__":
+  HbaseServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/status_params.py
new file mode 100755
index 0000000..e3b7d04
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/status_params.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+# a map of the Ambari role to the component name
+# for use with /usr/iop/current/<component>
+SERVER_ROLE_DIRECTORY_MAP = {
+  'HBASE_MASTER' : 'hbase-master',
+  'HBASE_REGIONSERVER' : 'hbase-regionserver',
+  'HBASE_CLIENT' : 'hbase-client'
+}
+
+component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "HBASE_CLIENT")
+
+config = Script.get_config()
+
+pid_dir = config['configurations']['hbase-env']['hbase_pid_dir']
+hbase_user = config['configurations']['hbase-env']['hbase_user']
+
+# Security related/required params
+hostname = config['hostname']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+kinit_path_local = functions.get_kinit_path()
+tmp_dir = Script.get_tmp_dir()
+
+hbase_conf_dir = format("/usr/iop/current/{component_directory}/conf")

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/upgrade.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/upgrade.py
new file mode 100755
index 0000000..11b770d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/scripts/upgrade.py
@@ -0,0 +1,52 @@
+
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management import *
+from resource_management.core.resources.system import Execute
+from resource_management.core import shell
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
+from resource_management.libraries.functions.decorator import retry
+
+def prestart(env, component):
+  import params
+
+  if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
+    conf_select.select(params.stack_name, "hbase", params.version)
+    stack_select.select(component, params.version)
+
+def post_regionserver(env):
+  import params
+  env.set_params(params)
+
+  check_cmd = "echo 'status \"simple\"' | {0} shell".format(params.hbase_cmd)
+
+  exec_cmd = "{0} {1}".format(params.kinit_cmd, check_cmd)
+  call_and_match(exec_cmd, params.hbase_user, params.hostname.lower() + ":", re.IGNORECASE)
+
+
+@retry(times=15, sleep_time=2, err_class=Fail)
+def call_and_match(cmd, user, regex, regex_search_flags):
+
+  code, out = shell.call(cmd, user=user)
+
+  if not (out and re.search(regex, out, regex_search_flags)):
+    raise Fail("Could not verify RS available")

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
new file mode 100755
index 0000000..78ee5fa
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
@@ -0,0 +1,109 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See http://wiki.apache.org/hadoop/GangliaMetrics
+#
+# Make sure you know whether you are using ganglia 3.0 or 3.1.
+# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
+# And, yes, this file is named hadoop-metrics.properties rather than
+# hbase-metrics.properties because we're leveraging the hadoop metrics
+# package and hadoop-metrics.properties is an hardcoded-name, at least
+# for the moment.
+#
+# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
+
+# HBase-specific configuration to reset long-running stats (e.g. compactions)
+# If this variable is left out, then the default is no expiration.
+hbase.extendedperiod = 3600
+
+{% if has_metric_collector %}
+
+*.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
+*.sink.timeline.slave.host.name={{hostname}}
+hbase.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+hbase.period=10
+hbase.collector={{metric_collector_host}}:{{metric_collector_port}}
+
+jvm.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+jvm.period=10
+jvm.collector={{metric_collector_host}}:{{metric_collector_port}}
+
+rpc.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+rpc.period=10
+rpc.collector={{metric_collector_host}}:{{metric_collector_port}}
+
+hbase.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+hbase.sink.timeline.period=10
+hbase.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
+
+{% else %}
+
+# Configuration of the "hbase" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+hbase.period=10
+hbase.servers={{ganglia_server_host}}:8663
+
+# Configuration of the "jvm" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+jvm.period=10
+jvm.servers={{ganglia_server_host}}:8663
+
+# Configuration of the "rpc" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+rpc.period=10
+rpc.servers={{ganglia_server_host}}:8663
+
+#Ganglia following hadoop example
+hbase.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+hbase.sink.ganglia.period=10
+
+# default for supportsparse is false
+*.sink.ganglia.supportsparse=true
+
+.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+hbase.sink.ganglia.servers={{ganglia_server_host}}:8663
+
+{% endif %}
+
+# Disable HBase metrics for regions/tables/regionservers by default.
+*.source.filter.class=org.apache.hadoop.metrics2.filter.GlobFilter
+hbase.*.source.filter.exclude=*Regions*

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
new file mode 100755
index 0000000..2c01130
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
@@ -0,0 +1,107 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See http://wiki.apache.org/hadoop/GangliaMetrics
+#
+# Make sure you know whether you are using ganglia 3.0 or 3.1.
+# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
+# And, yes, this file is named hadoop-metrics.properties rather than
+# hbase-metrics.properties because we're leveraging the hadoop metrics
+# package and hadoop-metrics.properties is an hardcoded-name, at least
+# for the moment.
+#
+# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
+
+# HBase-specific configuration to reset long-running stats (e.g. compactions)
+# If this variable is left out, then the default is no expiration.
+hbase.extendedperiod = 3600
+
+{% if has_metric_collector %}
+
+*.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
+*.sink.timeline.slave.host.name={{hostname}}
+hbase.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+hbase.period=10
+hbase.collector={{metric_collector_host}}:{{metric_collector_port}}
+
+jvm.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+jvm.period=10
+jvm.collector={{metric_collector_host}}:{{metric_collector_port}}
+
+rpc.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+rpc.period=10
+rpc.collector={{metric_collector_host}}:{{metric_collector_port}}
+
+hbase.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+hbase.sink.timeline.period=10
+hbase.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
+
+{% else %}
+
+# Configuration of the "hbase" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+hbase.period=10
+hbase.servers={{ganglia_server_host}}:8656
+
+# Configuration of the "jvm" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+jvm.period=10
+jvm.servers={{ganglia_server_host}}:8656
+
+# Configuration of the "rpc" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+rpc.period=10
+rpc.servers={{ganglia_server_host}}:8656
+
+#Ganglia following hadoop example
+hbase.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+hbase.sink.ganglia.period=10
+
+# default for supportsparse is false
+*.sink.ganglia.supportsparse=true
+
+.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+hbase.sink.ganglia.servers={{ganglia_server_host}}:8656
+{% endif %}
+
+# Disable HBase metrics for regions/tables/regionservers by default.
+*.source.filter.class=org.apache.hadoop.metrics2.filter.GlobFilter
+hbase.*.source.filter.exclude=*Regions*

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/templates/hbase-smoke.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/templates/hbase-smoke.sh.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/templates/hbase-smoke.sh.j2
new file mode 100755
index 0000000..458da95
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/templates/hbase-smoke.sh.j2
@@ -0,0 +1,44 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+disable 'ambarismoketest'
+drop 'ambarismoketest'
+create 'ambarismoketest','family'
+put 'ambarismoketest','row01','family:col01','{{service_check_data}}'
+scan 'ambarismoketest'
+exit
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/templates/hbase.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/templates/hbase.conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/templates/hbase.conf.j2
new file mode 100755
index 0000000..68f947f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/templates/hbase.conf.j2
@@ -0,0 +1,35 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{{hbase_user}}   - nofile 32000
+{{hbase_user}}   - nproc  16000

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/templates/hbase_client_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/templates/hbase_client_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/templates/hbase_client_jaas.conf.j2
new file mode 100755
index 0000000..38f9721
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/templates/hbase_client_jaas.conf.j2
@@ -0,0 +1,23 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=false
+useTicketCache=true;
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/templates/hbase_grant_permissions.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/templates/hbase_grant_permissions.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/templates/hbase_grant_permissions.j2
new file mode 100755
index 0000000..3378983
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/templates/hbase_grant_permissions.j2
@@ -0,0 +1,39 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+grant '{{smoke_test_user}}', '{{smokeuser_permissions}}'
+exit
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/templates/hbase_master_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/templates/hbase_master_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/templates/hbase_master_jaas.conf.j2
new file mode 100755
index 0000000..a93c36c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/templates/hbase_master_jaas.conf.j2
@@ -0,0 +1,26 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=true
+storeKey=true
+useTicketCache=false
+keyTab="{{master_keytab_path}}"
+principal="{{master_jaas_princ}}";
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
new file mode 100755
index 0000000..7097481
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
@@ -0,0 +1,26 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=true
+storeKey=true
+useTicketCache=false
+keyTab="{{regionserver_keytab_path}}"
+principal="{{regionserver_jaas_princ}}";
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/templates/hbase_rest_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/templates/hbase_rest_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/templates/hbase_rest_jaas.conf.j2
new file mode 100755
index 0000000..2dc6988
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/templates/hbase_rest_jaas.conf.j2
@@ -0,0 +1,26 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=true
+storeKey=true
+useTicketCache=false
+keyTab="{{rest_server_keytab_path}}"
+principal="{{rest_server_jaas_princ}}";
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/templates/regionservers.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/templates/regionservers.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/templates/regionservers.j2
new file mode 100755
index 0000000..fc6cc37
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/package/templates/regionservers.j2
@@ -0,0 +1,20 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in rs_hosts %}{{host}}
+{% endfor %}
\ No newline at end of file


[43/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/package/templates/log4j.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/package/templates/log4j.properties.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/package/templates/log4j.properties.j2
new file mode 100755
index 0000000..3b34db8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/package/templates/log4j.properties.j2
@@ -0,0 +1,67 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+# Define some default values that can be overridden by system properties.
+#
+# For testing, it may also be convenient to specify
+# -Dflume.root.logger=DEBUG,console when launching flume.
+
+#flume.root.logger=DEBUG,console
+flume.root.logger=INFO,LOGFILE
+flume.log.dir={{flume_log_dir}}
+flume.log.file=flume-{{agent_name}}.log
+
+log4j.logger.org.apache.flume.lifecycle = INFO
+log4j.logger.org.jboss = WARN
+log4j.logger.org.mortbay = INFO
+log4j.logger.org.apache.avro.ipc.NettyTransceiver = WARN
+log4j.logger.org.apache.hadoop = INFO
+
+# Define the root logger to the system property "flume.root.logger".
+log4j.rootLogger=${flume.root.logger}
+
+
+# Stock log4j rolling file appender
+# Default log rotation configuration
+log4j.appender.LOGFILE=org.apache.log4j.RollingFileAppender
+log4j.appender.LOGFILE.MaxFileSize=100MB
+log4j.appender.LOGFILE.MaxBackupIndex=10
+log4j.appender.LOGFILE.File=${flume.log.dir}/${flume.log.file}
+log4j.appender.LOGFILE.layout=org.apache.log4j.PatternLayout
+log4j.appender.LOGFILE.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss,SSS} %-5p [%t] (%C.%M:%L) %x - %m%n
+
+
+# Warning: If you enable the following appender it will fill up your disk if you don't have a cleanup job!
+# This uses the updated rolling file appender from log4j-extras that supports a reliable time-based rolling policy.
+# See http://logging.apache.org/log4j/companions/extras/apidocs/org/apache/log4j/rolling/TimeBasedRollingPolicy.html
+# Add "DAILY" to flume.root.logger above if you want to use this
+log4j.appender.DAILY=org.apache.log4j.rolling.RollingFileAppender
+log4j.appender.DAILY.rollingPolicy=org.apache.log4j.rolling.TimeBasedRollingPolicy
+log4j.appender.DAILY.rollingPolicy.ActiveFileName=${flume.log.dir}/${flume.log.file}
+log4j.appender.DAILY.rollingPolicy.FileNamePattern=${flume.log.dir}/${flume.log.file}.%d{yyyy-MM-dd}
+log4j.appender.DAILY.layout=org.apache.log4j.PatternLayout
+log4j.appender.DAILY.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss,SSS} %-5p [%t] (%C.%M:%L) %x - %m%n
+
+
+# console
+# Add "console" to flume.root.logger above if you want to use this
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d (%t) [%p - %l] %m%n

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/alerts.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/alerts.json
new file mode 100755
index 0000000..12846ca
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/alerts.json
@@ -0,0 +1,157 @@
+{
+  "HBASE": {
+    "service": [
+      {
+        "name": "hbase_regionserver_process_percent",
+        "label": "Percent RegionServers Available",
+        "description": "This service-level alert is triggered if the configured percentage of RegionServer processes cannot be determined to be up and listening on the network for the configured warning and critical thresholds. It aggregates the results of RegionServer process down checks.",
+        "interval": 1,
+        "scope": "SERVICE",
+        "enabled": true,
+        "source": {
+          "type": "AGGREGATE",
+          "alert_name": "hbase_regionserver_process",
+          "reporting": {
+            "ok": {
+              "text": "affected: [{1}], total: [{0}]"
+            },
+            "warning": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 0.1
+            },
+            "critical": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 0.3
+            }
+          }
+        }
+      }
+    ],
+    "HBASE_MASTER": [
+      {
+        "name": "hbase_master_process",
+        "label": "HBase Master Process",
+        "description": "This alert is triggered if the HBase master processes cannot be confirmed to be up and listening on the network for the configured critical threshold, given in seconds.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "PORT",
+          "uri": "{{hbase-site/hbase.master.port}}",
+          "default_port": 60000,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      },
+      {
+        "name": "hbase_master_cpu",
+        "label": "HBase Maser CPU Utilization",
+        "description": "This host-level alert is triggered if CPU utilization of the HBase Master exceeds certain warning and critical thresholds. It checks the HBase Master JMX Servlet for the SystemCPULoad property. The threshold values are in percent.",
+        "interval": 5,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{hbase-site/hbase.master.info.port}}",
+            "default_port": 60010
+          },
+          "reporting": {
+            "ok": {
+              "text": "{1} CPU, load {0:.1%}"
+            },
+            "warning": {
+              "text": "{1} CPU, load {0:.1%}",
+              "value": 200
+            },
+            "critical": {
+              "text": "{1} CPU, load {0:.1%}",
+              "value": 250
+            },
+            "units" : "%"
+          },
+          "jmx": {
+            "property_list": [
+              "java.lang:type=OperatingSystem/SystemCpuLoad",
+              "java.lang:type=OperatingSystem/AvailableProcessors"
+            ],
+            "value": "{0} * 100"
+          }
+        }
+      },
+      {
+        "name": "regionservers_health_summary",
+        "label": "RegionServers Health Summary",
+        "description": "This service-level alert is triggered if there are unhealthy RegionServers",
+        "interval": 1,
+        "scope": "SERVICE",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{hbase-site/hbase.master.info.port}}",
+            "default_port": 60010
+          },
+          "reporting": {
+            "ok": {
+              "text": "All {1} RegionServer(s) are alive"
+            },
+            "warning": {
+              "text": "Dead RegionServer(s): {0} out of {1}",
+              "value": 1
+            },
+            "critical": {
+              "text": "Dead RegionServer(s): {0} out of {1}",
+              "value": 1
+            },
+            "units" : "RegionServer(s)"
+          },
+          "jmx": {
+            "property_list": [
+              "Hadoop:service=HBase,name=Master,sub=Server/numDeadRegionServers",
+              "Hadoop:service=HBase,name=Master,sub=Server/numRegionServers"
+            ],
+            "value": "{0}"
+          }
+        }
+      }
+    ],
+    "HBASE_REGIONSERVER": [
+      {
+        "name": "hbase_regionserver_process",
+        "label": "HBase RegionServer Process",
+        "description": "This host-level alert is triggered if the RegionServer processes cannot be confirmed to be up and listening on the network for the configured critical threshold, given in seconds.",
+        "interval": 1,
+        "scope": "HOST",
+        "source": {
+          "type": "PORT",
+          "uri": "{{hbase-site/hbase.regionserver.info.port}}",
+          "default_port": 60030,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/configuration/hbase-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/configuration/hbase-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/configuration/hbase-env.xml
new file mode 100755
index 0000000..47a2bb6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/configuration/hbase-env.xml
@@ -0,0 +1,175 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>hbase_log_dir</name>
+    <display-name>HBase Log Dir Prefix</display-name>
+    <value>/var/log/hbase</value>
+    <description>Log Directories for HBase.</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase_pid_dir</name>
+    <display-name>HBase PID Dir</display-name>
+    <value>/var/run/hbase</value>
+    <description>Pid Directory for HBase.</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase_regionserver_heapsize</name>
+    <value>2048</value>
+    <description>Maximum amount of memory each HBase RegionServer can use.</description>
+    <display-name>HBase RegionServer Maximum Memory</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>1024</minimum>
+      <maximum>6554</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase_regionserver_xmn_max</name>
+    <value>512</value>
+    <description> Sets the upper bound on HBase RegionServers' young generation size.
+		This value is used in case the young generation size (-Xmn) calculated based on the max heapsize (hbase_regionserver_heapsize)
+		and the -Xmn ratio (hbase_regionserver_xmn_ratio) exceeds this value.
+    </description>
+    <display-name>RegionServers maximum value for -Xmn</display-name>
+    <value-attributes>
+      <type>int</type>
+      <unit>MB</unit>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase_regionserver_xmn_ratio</name>
+    <display-name>RegionServers -Xmn in -Xmx ratio</display-name>
+    <value>0.2</value>
+    <description>Percentage of max heap size (-Xmx) which used for young generation heap (-Xmn).</description>
+    <value-attributes>
+      <type>float</type>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase_master_heapsize</name>
+    <value>2048</value>
+    <description>Maximum amount of memory each HBase Master can use.</description>
+    <display-name>HBase Master Maximum Memory</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>1024</minimum>
+      <maximum>16384</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+    </value-attributes>
+  </property>
+   <property>
+    <name>hbase_user</name>
+    <value>hbase</value>
+    <property-type>USER</property-type>
+    <description>HBase User Name.</description>
+  </property>
+
+  <!-- hbase-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for hbase-env.sh file</description>
+    <value>
+# Set environment variables here.
+
+# The java implementation to use. Java 1.6 required.
+export JAVA_HOME={{java64_home}}
+
+# HBase Configuration directory
+export HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}
+
+# Extra Java CLASSPATH elements. Optional.
+export HBASE_CLASSPATH=${HBASE_CLASSPATH}
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+# export HBASE_HEAPSIZE=1000
+
+# Extra Java runtime options.
+# Below are what we set by default. May only work with SUN JVM.
+# For more on why as well as other possible settings,
+# see http://wiki.apache.org/hadoop/PerformanceTuning
+export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`"
+# Uncomment below to enable java garbage collection logging.
+# export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log"
+
+# Uncomment and adjust to enable JMX exporting
+# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
+# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
+#
+# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
+# If you want to configure BucketCache, specify '-XX: MaxDirectMemorySize=' with proper direct memory size
+# export HBASE_THRIFT_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
+# export HBASE_ZOOKEEPER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
+
+# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
+export HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers
+
+# Extra ssh options. Empty by default.
+# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
+
+# Where log files are stored. $HBASE_HOME/logs by default.
+export HBASE_LOG_DIR={{log_dir}}
+
+# A string representing this instance of hbase. $USER by default.
+# export HBASE_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes. See 'man nice'.
+# export HBASE_NICENESS=10
+
+# The directory where pid files are stored. /tmp by default.
+export HBASE_PID_DIR={{pid_dir}}
+
+# Seconds to sleep between slave commands. Unset by default. This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HBASE_SLAVE_SLEEP=0.1
+
+# Tell HBase whether it should manage it's own instance of Zookeeper or not.
+export HBASE_MANAGES_ZK=false
+
+{% if security_enabled %}
+export HBASE_OPTS="$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.security.auth.login.config={{client_jaas_config_file}}"
+export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} -Djava.security.auth.login.config={{master_jaas_config_file}}"
+export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} -Djava.security.auth.login.config={{regionserver_jaas_config_file}}"
+{% else %}
+export HBASE_OPTS="$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log"
+export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Xmx{{master_heapsize}}"
+export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}"
+{% endif %}
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/configuration/hbase-javaopts-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/configuration/hbase-javaopts-properties.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/configuration/hbase-javaopts-properties.xml
new file mode 100755
index 0000000..3eca77a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/configuration/hbase-javaopts-properties.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+  <property>
+    <name>content</name>
+    <description>Hbase-javaopts-properties</description>
+    <value> </value>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/configuration/hbase-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/configuration/hbase-log4j.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/configuration/hbase-log4j.xml
new file mode 100755
index 0000000..2e8fb3d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/configuration/hbase-log4j.xml
@@ -0,0 +1,143 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <description>Custom log4j.properties</description>
+    <value>
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Define some default values that can be overridden by system properties
+hbase.root.logger=INFO,console
+hbase.security.logger=INFO,console
+hbase.log.dir=.
+hbase.log.file=hbase.log
+
+# Define the root logger to the system property "hbase.root.logger".
+log4j.rootLogger=${hbase.root.logger}
+
+# Logging Threshold
+log4j.threshold=ALL
+
+#
+# Daily Rolling File Appender
+#
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n
+
+# Rolling File Appender properties
+hbase.log.maxfilesize=256MB
+hbase.log.maxbackupindex=20
+
+# Rolling File Appender
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hbase.log.dir}/${hbase.log.file}
+
+log4j.appender.RFA.MaxFileSize=${hbase.log.maxfilesize}
+log4j.appender.RFA.MaxBackupIndex=${hbase.log.maxbackupindex}
+
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n
+
+#
+# Security audit appender
+#
+hbase.security.log.file=SecurityAuth.audit
+hbase.security.log.maxfilesize=256MB
+hbase.security.log.maxbackupindex=20
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAS.File=${hbase.log.dir}/${hbase.security.log.file}
+log4j.appender.RFAS.MaxFileSize=${hbase.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hbase.security.log.maxbackupindex}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.category.SecurityLogger=${hbase.security.logger}
+log4j.additivity.SecurityLogger=false
+#log4j.logger.SecurityLogger.org.apache.hadoop.hbase.security.access.AccessController=TRACE
+
+#
+# Null Appender
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n
+
+# Custom Logging levels
+
+log4j.logger.org.apache.zookeeper=INFO
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+#log4j.logger.org.apache.hadoop.hbase=DEBUG
+# Make these two classes INFO-level. Make them DEBUG to see more zk debug.
+log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO
+log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO
+#log4j.logger.org.apache.hadoop.dfs=DEBUG
+# Set this class to log INFO only otherwise its OTT
+# Enable this to get detailed connection error/retry logging.
+# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE
+
+
+# Uncomment this line to enable tracing on _every_ RPC call (this can be a lot of output)
+#log4j.logger.org.apache.hadoop.ipc.HBaseServer.trace=DEBUG
+
+# Uncomment the below if you want to remove logging of client region caching'
+# and scan of .META. messages
+# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=INFO
+# log4j.logger.org.apache.hadoop.hbase.client.MetaScanner=INFO
+
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/configuration/hbase-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/configuration/hbase-policy.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/configuration/hbase-policy.xml
new file mode 100755
index 0000000..b0807b6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/configuration/hbase-policy.xml
@@ -0,0 +1,53 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="true">
+  <property>
+    <name>security.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HRegionInterface protocol implementations (ie.
+    clients talking to HRegionServers)
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.admin.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HMasterInterface protocol implementation (ie.
+    clients talking to HMaster for admin operations).
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.masterregion.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HMasterRegionInterface protocol implementations
+    (for HRegionServers communicating with HMaster)
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/configuration/hbase-site.xml
new file mode 100755
index 0000000..d4cca6c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/configuration/hbase-site.xml
@@ -0,0 +1,681 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+  <property>
+    <name>hbase.rootdir</name>
+    <value>hdfs://localhost:8020/apps/hbase/data</value>
+    <description>The directory shared by region servers and into
+    which HBase persists.  The URL should be 'fully-qualified'
+    to include the filesystem scheme.  For example, to specify the
+    HDFS directory '/hbase' where the HDFS instance's namenode is
+    running at namenode.example.org on port 9000, set this value to:
+    hdfs://namenode.example.org:9000/hbase.  By default HBase writes
+    into /tmp.  Change this configuration else all data will be lost
+    on machine restart.
+    </description>
+  </property>
+  <property>
+    <name>hbase.cluster.distributed</name>
+    <value>true</value>
+    <description>The mode the cluster will be in. Possible values are
+      false for standalone mode and true for distributed mode.  If
+      false, startup will run all HBase and ZooKeeper daemons together
+      in the one JVM.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.port</name>
+    <value>60000</value>
+    <display-name>HBase Master Port</display-name>
+    <description>The port the HBase Master should bind to.</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>int</type>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.tmp.dir</name>
+    <value>/hadoop/hbase</value>
+    <description>Temporary directory on the local filesystem.
+    Change this setting to point to a location more permanent
+    than '/tmp' (The '/tmp' directory is often cleared on
+    machine restart).
+    </description>
+    <display-name>HBase tmp directory</display-name>
+    <value-attributes>
+      <type>directory</type>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.local.dir</name>
+    <value>${hbase.tmp.dir}/local</value>
+    <description>Directory on the local filesystem to be used as a local storage
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.info.bindAddress</name>
+    <value>0.0.0.0</value>
+    <description>The bind address for the HBase Master web UI
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.info.port</name>
+    <value>60010</value>
+    <description>The port for the HBase Master web UI.</description>
+  </property>
+  <property>
+    <name>hbase.regionserver.port</name>
+    <value>60020</value>
+    <description>The port the HBase Region Server should bind to.</description>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port</name>
+    <value>60030</value>
+    <description>The port for the HBase RegionServer web UI.</description>
+  </property>
+  <property>
+    <name>hbase.regionserver.global.memstore.upperLimit</name>
+    <value>0.4</value>
+    <description>
+      Percentage of RegionServer memory to allocate to write buffers.
+      Each column family within each region is allocated a smaller
+      pool (the memstore) within this shared write pool. If this buffer is full,
+      updates are blocked and data is flushed from memstores until a global low
+      watermark (hbase.regionserver.global.memstore.size.lower.limit) is reached.
+    </description>
+    <display-name>% of RegionServer Allocated to Write Buffers</display-name>
+    <value-attributes>
+      <type>float</type>
+      <minimum>0</minimum>
+      <maximum>0.8</maximum>
+      <increment-step>0.01</increment-step>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.regionserver.handler.count</name>
+    <value>60</value>
+    <description>Count of RPC Listener instances spun up on RegionServers.
+    Same property is used by the Master for count of master handlers.
+    </description>
+    <display-name>Number of Handlers per RegionServer</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>5</minimum>
+      <maximum>240</maximum>
+      <increment-step>1</increment-step>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.hregion.majorcompaction</name>
+    <value>604800000</value>
+    <description>
+      Time between major compactions. Set to 0 to disable automatic major compactions.
+    </description>
+    <display-name>Major Compaction Interval</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>2592000000</maximum>
+      <unit>milliseconds</unit>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.hregion.majorcompaction.jitter</name>
+    <value>0.50</value>
+    <description>A multiplier applied to hbase.hregion.majorcompaction to cause compaction to occur
+      a given amount of time either side of hbase.hregion.majorcompaction. The smaller the number,
+      the closer the compactions will happen to the hbase.hregion.majorcompaction
+      interval.</description>
+  </property>
+
+  <property>
+    <name>hbase.regionserver.global.memstore.lowerLimit</name>
+    <value>0.38</value>
+    <description>When memstores are being forced to flush to make room in
+      memory, keep flushing until we hit this mark. Defaults to 35% of heap.
+      This value equal to hbase.regionserver.global.memstore.upperLimit causes
+      the minimum possible flushing to occur when updates are blocked due to
+      memstore limiting.
+    </description>
+    <value-attributes>
+      <type>float</type>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.block.multiplier</name>
+    <value>4</value>
+    <description>
+    Block updates if memstore has hbase.hregion.memstore.block.multiplier
+    times hbase.hregion.memstore.flush.size bytes.  Useful preventing
+    runaway memstore during spikes in update traffic.  Without an
+    upper-bound, memstore fills such that when it flushes the
+    resultant flush files take a long time to compact or split, or
+    worse, we OOME.
+    </description>
+    <display-name>Per-Column Family Memstore Block Multiplier</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>2</value>
+        </entry>
+        <entry>
+          <value>4</value>
+        </entry>
+        <entry>
+          <value>8</value>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.flush.size</name>
+    <value>134217728</value>
+    <description>
+      The size of an individual memstore. Each column familiy within each region is allocated its own memstore.
+    </description>
+    <display-name>Per-Column Family Memstore Flush Size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>33554432</minimum>
+      <maximum>268435456</maximum>
+      <increment-step>1048576</increment-step>
+      <unit>B</unit>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.mslab.enabled</name>
+    <value>true</value>
+    <description>
+      Enables the MemStore-Local Allocation Buffer,
+      a feature which works to prevent heap fragmentation under
+      heavy write loads. This can reduce the frequency of stop-the-world
+      GC pauses on large heaps.
+    </description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.hregion.max.filesize</name>
+    <value>10737418240</value>
+    <description>
+      Maximum HFile size. If the sum of the sizes of a region's HFiles has grown to exceed this
+      value, the region is split in two.
+    </description>
+    <display-name>Maximum Region File Size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>1073741824</minimum>
+      <maximum>107374182400</maximum>
+      <unit>B</unit>
+      <increment-step>1073741824</increment-step>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.client.scanner.caching</name>
+    <value>100</value>
+    <description>Number of rows that will be fetched when calling next
+    on a scanner if it is not served from (local, client) memory. Higher
+    caching values will enable faster scanners but will eat up more memory
+    and some calls of next may take longer and longer times when the cache is empty.
+    Do not set this value such that the time between invocations is greater
+    than the scanner timeout; i.e. hbase.regionserver.lease.period
+    </description>
+    <display-name>Number of Fetched Rows when Scanning from Disk</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>100</minimum>
+      <maximum>10000</maximum>
+      <increment-step>100</increment-step>
+    </value-attributes>
+  </property>
+  <property>
+    <name>zookeeper.session.timeout</name>
+    <value>120000</value>
+    <description>ZooKeeper session timeout.
+      ZooKeeper session timeout in milliseconds. It is used in two different ways.
+      First, this value is used in the ZK client that HBase uses to connect to the ensemble.
+      It is also used by HBase when it starts a ZK server and it is passed as the 'maxSessionTimeout'. See
+      http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions.
+      For example, if a HBase region server connects to a ZK ensemble that's also managed by HBase, then the
+      session timeout will be the one specified by this configuration. But, a region server that connects
+      to an ensemble managed with a different configuration will be subjected that ensemble's maxSessionTimeout. So,
+      even though HBase might propose using 90 seconds, the ensemble can have a max timeout lower than this and
+      it will take precedence.
+    </description>
+    <display-name>Zookeeper Session Timeout</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>10000</minimum>
+      <maximum>180000</maximum>
+      <unit>milliseconds</unit>
+      <increment-step>10000</increment-step>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.client.keyvalue.maxsize</name>
+    <value>10485760</value>
+    <description>Specifies the combined maximum allowed size of a KeyValue
+    instance. This is to set an upper boundary for a single entry saved in a
+    storage file. Since they cannot be split it helps avoiding that a region
+    cannot be split any further because the data is too large. It seems wise
+    to set this to a fraction of the maximum region size. Setting it to zero
+    or less disables the check.
+    </description>
+    <display-name>Maximum Record Size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>1048576</minimum>
+      <maximum>31457280</maximum>
+      <unit>B</unit>
+      <increment-step>262144</increment-step>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.hstore.compactionThreshold</name>
+    <value>3</value>
+    <description>
+      The maximum number of StoreFiles which will be selected for a single minor
+      compaction, regardless of the number of eligible StoreFiles. Effectively, the value of
+      hbase.hstore.compaction.max controls the length of time it takes a single compaction to
+      complete. Setting it larger means that more StoreFiles are included in a compaction. For most
+      cases, the default value is appropriate.
+    </description>
+    <display-name>Maximum Store Files before Minor Compaction</display-name>
+    <value-attributes>
+      <type>int</type>
+      <entries>
+        <entry>
+          <value>2</value>
+        </entry>
+        <entry>
+          <value>3</value>
+        </entry>
+        <entry>
+          <value>4</value>
+        </entry>
+      </entries>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.hstore.flush.retries.number</name>
+    <value>120</value>
+    <description>
+    The number of times the region flush operation will be retried.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.hstore.blockingStoreFiles</name>
+    <display-name>hstore blocking storefiles</display-name>
+    <value>10</value>
+    <description>
+    If more than this number of StoreFiles in any one Store
+    (one StoreFile is written per flush of MemStore) then updates are
+    blocked for this HRegion until a compaction is completed, or
+    until hbase.hstore.blockingWaitTime has been exceeded.
+    </description>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hfile.block.cache.size</name>
+    <value>0.40</value>
+    <description>Percentage of RegionServer memory to allocate to read buffers.</description>
+    <display-name>% of RegionServer Allocated to Read Buffers</display-name>
+    <value-attributes>
+      <type>float</type>
+      <minimum>0</minimum>
+      <maximum>0.8</maximum>
+      <increment-step>0.01</increment-step>
+    </value-attributes>
+  </property>
+
+  <!-- The following properties configure authentication information for
+       HBase processes when using Kerberos security.  There are no default
+       values, included here for documentation purposes -->
+  <property>
+    <name>hbase.master.keytab.file</name>
+    <value>/etc/security/keytabs/hbase.service.keytab</value>
+    <description>Full path to the kerberos keytab file to use for logging in
+    the configured HMaster server principal.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.kerberos.principal</name>
+    <value>hbase/_HOST@EXAMPLE.COM</value>
+    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
+    that should be used to run the HMaster process.  The principal name should
+    be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the hostname
+    portion, it will be replaced with the actual hostname of the running
+    instance.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.keytab.file</name>
+    <value>/etc/security/keytabs/hbase.service.keytab</value>
+    <description>Full path to the kerberos keytab file to use for logging in
+    the configured HRegionServer server principal.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.kerberos.principal</name>
+    <value>hbase/_HOST@EXAMPLE.COM</value>
+    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
+    that should be used to run the HRegionServer process.  The principal name
+    should be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the
+    hostname portion, it will be replaced with the actual hostname of the
+    running instance.  An entry for this principal must exist in the file
+    specified in hbase.regionserver.keytab.file
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.rest.keytab.file</name>
+    <value>/etc/security/keytabs/hbase.service.keytab</value>
+  </property>
+  <property>
+    <name>hbase.rest.kerberos.principal</name>
+    <value>hbase/_HOST@EXAMPLE.COM</value>
+  </property>
+  <property>
+    <name>hbase.rest.authentication.kerberos.keytab</name>
+    <value>/etc/security/keytabs/hbase.service.keytab</value>
+  </property>
+  <property>
+    <name>hbase.rest.authentication.kerberos.principal</name>
+    <value>HTTP/_HOST@EXAMPLE.COM</value>
+  </property>
+
+  <!-- Additional configuration specific to HBase security -->
+  <property>
+    <name>hbase.superuser</name>
+    <value>hbase</value>
+    <description>List of users or groups (comma-separated), who are allowed
+    full privileges, regardless of stored ACLs, across the cluster.
+    Only used when HBase security is enabled.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.security.authentication</name>
+    <value>simple</value>
+    <description>
+      Select Simple or Kerberos authentication. Note: Kerberos must be set up before the Kerberos option will take effect.
+    </description>
+    <display-name>Enable Authentication</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <label>Simple</label>
+          <value>simple</value>
+        </entry>
+        <entry>
+          <label>Kerberos</label>
+          <value>kerberos</value>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hbase.security.authorization</name>
+    <value>false</value>
+    <description> Set Authorization Method.</description>
+    <display-name>Enable Authorization</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>Native</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>Off</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hbase.coprocessor.region.classes</name>
+    <value></value>
+    <description>A comma-separated list of Coprocessors that are loaded by
+    default on all tables. For any override coprocessor method, these classes
+    will be called in order. After implementing your own Coprocessor, just put
+    it in HBase's classpath and add the fully qualified class name here.
+    A coprocessor can also be loaded on demand by setting HTableDescriptor.
+    </description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hbase-site</type>
+        <name>hbase.security.authorization</name>
+      </property>
+      <property>
+        <type>hbase-site</type>
+        <name>hbase.security.authentication</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hbase.coprocessor.master.classes</name>
+    <value></value>
+    <description>A comma-separated list of
+      org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
+      loaded by default on the active HMaster process. For any implemented
+      coprocessor methods, the listed classes will be called in order. After
+      implementing your own MasterObserver, just put it in HBase's classpath
+      and add the fully qualified class name here.
+    </description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hbase-site</type>
+        <name>hbase.security.authorization</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hbase.zookeeper.property.clientPort</name>
+    <value>2181</value>
+    <description>Property from ZooKeeper's config zoo.cfg.
+    The port at which the clients will connect.
+    </description>
+  </property>
+
+  <!--
+  The following three properties are used together to create the list of
+  host:peer_port:leader_port quorum servers for ZooKeeper.
+  -->
+  <property>
+    <name>hbase.zookeeper.quorum</name>
+    <value>localhost</value>
+    <description>Comma separated list of servers in the ZooKeeper Quorum.
+    For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
+    By default this is set to localhost for local and pseudo-distributed modes
+    of operation. For a fully-distributed setup, this should be set to a full
+    list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
+    this is the list of servers which we will start/stop ZooKeeper on.
+    </description>
+    <value-attributes>
+      <type>multiLine</type>
+    </value-attributes>
+  </property>
+  <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
+
+  <property>
+    <name>hbase.zookeeper.useMulti</name>
+    <value>true</value>
+    <description>Instructs HBase to make use of ZooKeeper's multi-update functionality.
+    This allows certain ZooKeeper operations to complete more quickly and prevents some issues
+    with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).·
+    IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+
+    and will not be downgraded.  ZooKeeper versions before 3.4 do not support multi-update and will
+    not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).
+    </description>
+  </property>
+  <property>
+    <name>zookeeper.znode.parent</name>
+    <value>/hbase-unsecure</value>
+    <description>Root ZNode for HBase in ZooKeeper. All of HBase's ZooKeeper
+      files that are configured with a relative path will go under this node.
+      By default, all of HBase's ZooKeeper file path are configured with a
+      relative path, so they will all go under this directory unless changed.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.client.retries.number</name>
+    <value>35</value>
+    <description>Maximum retries.  Used as maximum for all retryable
+    operations such as the getting of a cell's value, starting a row update,
+    etc.  Retry interval is a rough function based on hbase.client.pause.  At
+    first we retry at this interval but then with backoff, we pretty quickly reach
+    retrying every ten seconds.  See HConstants#RETRY_BACKOFF for how the backup
+    ramps up.  Change this setting and hbase.client.pause to suit your workload.</description>
+    <display-name>Maximum Client Retries</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>5</minimum>
+      <maximum>50</maximum>
+      <increment-step>1</increment-step>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hbase.rpc.timeout</name>
+    <value>90000</value>
+    <description>
+      This is for the RPC layer to define how long HBase client applications
+      take for a remote call to time out. It uses pings to check connections
+      but will eventually throw a TimeoutException.
+    </description>
+    <display-name>HBase RPC Timeout</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>10000</minimum>
+      <maximum>180000</maximum>
+      <unit>milliseconds</unit>
+      <increment-step>10000</increment-step>
+    </value-attributes>
+  </property>
+
+   <property>
+    <name>hbase.hstore.compaction.max</name>
+    <value>10</value>
+    <description>The maximum number of StoreFiles which will be selected for a single minor
+      compaction, regardless of the number of eligible StoreFiles. Effectively, the value of
+      hbase.hstore.compaction.max controls the length of time it takes a single compaction to
+      complete. Setting it larger means that more StoreFiles are included in a compaction. For most
+      cases, the default value is appropriate.
+    </description>
+    <display-name>Maximum Files in a Store before Compaction</display-name>
+    <value-attributes>
+      <type>int</type>
+      <entries>
+        <entry>
+          <value>8</value>
+        </entry>
+        <entry>
+          <value>9</value>
+        </entry>
+        <entry>
+          <value>10</value>
+        </entry>
+        <entry>
+          <value>11</value>
+        </entry>
+        <entry>
+          <value>12</value>
+        </entry>
+        <entry>
+          <value>13</value>
+        </entry>
+        <entry>
+          <value>14</value>
+        </entry>
+        <entry>
+          <value>15</value>
+        </entry>
+      </entries>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hbase.regionserver.global.memstore.size</name>
+    <value>0.4</value>
+    <description>Percentage of RegionServer memory to allocate to write buffers.
+      Each column family within each region is allocated a smaller pool (the memstore) within this shared write pool.
+      If this buffer is full, updates are blocked and data is flushed from memstores until a global low watermark
+      (hbase.regionserver.global.memstore.size.lower.limit) is reached.
+    </description>
+    <display-name>% of RegionServer Allocated to Write Buffers</display-name>
+    <value-attributes>
+      <type>float</type>
+      <minimum>0</minimum>
+      <maximum>0.8</maximum>
+      <increment-step>0.01</increment-step>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hbase.defaults.for.version.skip</name>
+    <value>true</value>
+    <description>Disables version verification.</description>
+  </property>
+
+  <property>
+    <name>dfs.domain.socket.path</name>
+    <value>/var/lib/hadoop-hdfs/dn_socket</value>
+    <description>Path to domain socket.</description>
+  </property>
+
+  <property>
+    <name>hbase.replication</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>hbase.rest.authentication.type</name>
+    <value>simple</value>
+  </property>
+
+  <property>
+    <name>hbase.rest.port</name>
+    <value>8091</value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/kerberos.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/kerberos.json
new file mode 100755
index 0000000..b8c507c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/kerberos.json
@@ -0,0 +1,159 @@
+{
+  "services": [
+    {
+      "name": "HBASE",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/hdfs"
+        },
+        {
+          "name": "hbase",
+          "principal": {
+            "value": "${hbase-env/hbase_user}@${realm}",
+            "type" : "user",
+            "configuration": "hbase-env/hbase_principal_name",
+            "local_username": "${hbase-env/hbase_user}"
+          },
+          "keytab": {
+            "file": "${keytab_dir}/hbase.headless.keytab",
+            "owner": {
+              "name": "${hbase-env/hbase_user}",
+              "access": "r"
+            },
+            "group": {
+              "name": "${cluster-env/user_group}",
+              "access": "r"
+            },
+            "configuration": "hbase-env/hbase_user_keytab"
+          }
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "configurations": [
+        {
+          "hbase-site": {
+            "hbase.security.authentication": "kerberos",
+            "hbase.security.authorization": "true",
+            "hbase.rest.authentication.type": "kerberos",
+            "zookeeper.znode.parent": "/hbase-secure",
+            "hbase.coprocessor.master.classes": "org.apache.hadoop.hbase.security.access.AccessController",
+            "hbase.coprocessor.region.classes": "org.apache.hadoop.hbase.security.token.TokenProvider,org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint,org.apache.hadoop.hbase.security.access.AccessController",
+            "hbase.bulkload.staging.dir": "/apps/hbase/staging"
+          }
+        },
+        {
+          "core-site": {
+            "hadoop.proxyuser.hbase.groups": "${hadoop-env/proxyuser_group}",
+            "hadoop.proxyuser.hbase.hosts": "*"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "HBASE_MASTER",
+          "identities": [
+            {
+              "name": "hbase_master_hbase",
+              "principal": {
+                "value": "hbase/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hbase-site/hbase.master.kerberos.principal",
+                "local_username": "${hbase-env/hbase_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hbase.service.keytab",
+                "owner": {
+                  "name": "${hbase-env/hbase_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hbase-site/hbase.master.keytab.file"
+              }
+            }
+          ]
+        },
+        {
+          "name": "HBASE_REGIONSERVER",
+          "identities": [
+            {
+              "name": "hbase_regionserver_hbase",
+              "principal": {
+                "value": "hbase/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hbase-site/hbase.regionserver.kerberos.principal",
+                "local_username": "${hbase-env/hbase_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hbase.service.keytab",
+                "owner": {
+                  "name": "${hbase-env/hbase_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hbase-site/hbase.regionserver.keytab.file"
+              }
+            }
+          ]
+        },
+        {
+          "name": "HBASE_REST_SERVER",
+          "identities": [
+            {
+              "name": "hbase_rest_server_hbase",
+              "principal": {
+                "value": "hbase/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hbase-site/hbase.rest.kerberos.principal",
+                "local_username": "${hbase-env/hbase_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hbase.service.keytab",
+                "owner": {
+                  "name": "${hbase-env/hbase_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hbase-site/hbase.rest.keytab.file"
+              }
+            },
+            {
+              "name": "hbase_rest_server_spnego",
+              "principal": {
+                "value": "HTTP/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hbase-site/hbase.rest.authentication.kerberos.principal",
+                "local_username": "${hbase-env/hbase_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hbase.service.keytab",
+                "owner": {
+                  "name": "${hbase-env/hbase_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hbase-site/hbase.rest.authentication.kerberos.keytab"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/metainfo.xml
new file mode 100755
index 0000000..8f3fb59
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/metainfo.xml
@@ -0,0 +1,161 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HBASE</name>
+      <displayName>HBase</displayName>
+      <comment>Non-relational distributed database and centralized service for configuration management &amp;
+        synchronization
+      </comment>
+      <version>1.1.1</version>
+      <components>
+        <component>
+          <name>HBASE_MASTER</name>
+          <displayName>HBase Master</displayName>
+          <category>MASTER</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <timelineAppid>HBASE</timelineAppid>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+              <scope>cluster</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+                <co-locate>HBASE/HBASE_MASTER</co-locate>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/hbase_master.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/hbase_master.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
+        </component>
+
+        <component>
+          <name>HBASE_REGIONSERVER</name>
+          <displayName>RegionServer</displayName>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <timelineAppid>HBASE</timelineAppid>
+          <commandScript>
+            <script>scripts/hbase_regionserver.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>HBASE_REST_SERVER</name>
+          <displayName>HBaseRESTServer</displayName>
+          <category>SLAVE</category>
+          <cardinality>0+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/hbase_restgatewayserver.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>HBASE_CLIENT</name>
+          <displayName>HBase Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/hbase_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>xml</type>
+              <fileName>hbase-site.xml</fileName>
+              <dictionaryName>hbase-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>hbase-env.sh</fileName>
+              <dictionaryName>hbase-env</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>xml</type>
+              <fileName>hbase-policy.xml</fileName>
+              <dictionaryName>hbase-policy</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>log4j.properties</fileName>
+              <dictionaryName>hbase-log4j</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>hbase</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <requiredServices>
+        <service>ZOOKEEPER</service>
+        <service>HDFS</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>hbase-policy</config-type>
+        <config-type>hbase-site</config-type>
+        <config-type>hbase-env</config-type>
+        <config-type>hbase-log4j</config-type>
+      </configuration-dependencies>
+
+    </service>
+  </services>
+</metainfo>


[44/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/status_params.py
new file mode 100755
index 0000000..d446baa
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/status_params.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from ambari_commons import OSCheck
+
+if OSCheck.is_windows_family():
+  from params_windows import *
+else:
+  from params_linux import *
+
+hbase_pid_dir = config['configurations']['ams-hbase-env']['hbase_pid_dir']
+hbase_user = ams_user
+ams_collector_pid_dir = config['configurations']['ams-env']['metrics_collector_pid_dir']
+ams_monitor_pid_dir = config['configurations']['ams-env']['metrics_monitor_pid_dir']
+
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+ams_hbase_conf_dir = format("{hbase_conf_dir}")
+
+kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+hostname = config['hostname']
+tmp_dir = Script.get_tmp_dir()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/ams.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/ams.conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/ams.conf.j2
new file mode 100755
index 0000000..c5fbc9b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/ams.conf.j2
@@ -0,0 +1,35 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{{ams_user}}   - nofile {{max_open_files_limit}}
+{{ams_user}}   - nproc  65536

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/ams_collector_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/ams_collector_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/ams_collector_jaas.conf.j2
new file mode 100755
index 0000000..f7f00eb
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/ams_collector_jaas.conf.j2
@@ -0,0 +1,26 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=true
+storeKey=true
+useTicketCache=false
+keyTab="{{ams_collector_keytab_path}}"
+principal="{{ams_collector_jaas_princ}}";
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/ams_zookeeper_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/ams_zookeeper_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/ams_zookeeper_jaas.conf.j2
new file mode 100755
index 0000000..1929548
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/ams_zookeeper_jaas.conf.j2
@@ -0,0 +1,26 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Server {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=true
+storeKey=true
+useTicketCache=false
+keyTab="{{ams_zookeeper_keytab}}"
+principal="{{ams_zookeeper_principal_name}}";
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/hadoop-metrics2-hbase.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/hadoop-metrics2-hbase.properties.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/hadoop-metrics2-hbase.properties.j2
new file mode 100755
index 0000000..a65ea88
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/hadoop-metrics2-hbase.properties.j2
@@ -0,0 +1,63 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# syntax: [prefix].[source|sink|jmx].[instance].[options]
+# See package.html for org.apache.hadoop.metrics2 for details
+
+# HBase-specific configuration to reset long-running stats (e.g. compactions)
+# If this variable is left out, then the default is no expiration.
+hbase.extendedperiod = 3600
+
+hbase.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+hbase.period=30
+hbase.collector={{metric_collector_host}}:{{metric_collector_port}}
+
+jvm.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+jvm.period=30
+jvm.collector={{metric_collector_host}}:{{metric_collector_port}}
+
+rpc.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+rpc.period=30
+rpc.collector={{metric_collector_host}}:{{metric_collector_port}}
+
+*.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
+*.sink.timeline.slave.host.name={{hostname}}
+hbase.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+hbase.sink.timeline.period={{metrics_collection_period}}
+hbase.sink.timeline.sendInterval={{metrics_report_interval}}000
+hbase.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
+hbase.sink.timeline.serviceName-prefix=ams
+
+# Switch off metrics generation on a per region basis
+*.source.filter.class=org.apache.hadoop.metrics2.filter.GlobFilter
+hbase.*.source.filter.exclude=*Regions*

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/hbase_client_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/hbase_client_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/hbase_client_jaas.conf.j2
new file mode 100755
index 0000000..38f9721
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/hbase_client_jaas.conf.j2
@@ -0,0 +1,23 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=false
+useTicketCache=true;
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/hbase_grant_permissions.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/hbase_grant_permissions.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/hbase_grant_permissions.j2
new file mode 100755
index 0000000..c29c674
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/hbase_grant_permissions.j2
@@ -0,0 +1,39 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+grant '{{smoke_test_user}}', '{{smokeuser_permissions}}'
+exit

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/hbase_master_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/hbase_master_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/hbase_master_jaas.conf.j2
new file mode 100755
index 0000000..a93c36c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/hbase_master_jaas.conf.j2
@@ -0,0 +1,26 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=true
+storeKey=true
+useTicketCache=false
+keyTab="{{master_keytab_path}}"
+principal="{{master_jaas_princ}}";
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/hbase_regionserver_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/hbase_regionserver_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/hbase_regionserver_jaas.conf.j2
new file mode 100755
index 0000000..7097481
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/hbase_regionserver_jaas.conf.j2
@@ -0,0 +1,26 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=true
+storeKey=true
+useTicketCache=false
+keyTab="{{regionserver_keytab_path}}"
+principal="{{regionserver_jaas_princ}}";
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/metric_groups.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/metric_groups.conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/metric_groups.conf.j2
new file mode 100755
index 0000000..aa03d195
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/metric_groups.conf.j2
@@ -0,0 +1,37 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{
+   "host_metric_groups": {
+      "all": {
+         "collect_every": "10",
+         "metrics": [
+            {
+               "name": "bytes_out",
+               "value_threshold": "128"
+            }
+         ]
+      }
+   },
+   "process_metric_groups": {
+      "": {
+         "collect_every": "15",
+         "metrics": []
+      }
+   }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/metric_monitor.ini.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/metric_monitor.ini.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/metric_monitor.ini.j2
new file mode 100755
index 0000000..fc86a58
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/metric_monitor.ini.j2
@@ -0,0 +1,31 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+[default]
+debug_level = INFO
+metrics_server = {{metric_collector_host}}:{{metric_collector_port}}
+hostname = {{hostname}}
+enable_time_threshold = false
+enable_value_threshold = false
+
+[emitter]
+send_interval = {{metrics_report_interval}}
+
+[collector]
+collector_sleep_interval = 5
+max_queue_size = 5000

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/regionservers.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/regionservers.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/regionservers.j2
new file mode 100755
index 0000000..81d060b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/regionservers.j2
@@ -0,0 +1,20 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in rs_hosts %}{{host}}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/smoketest_metrics.json.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/smoketest_metrics.json.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/smoketest_metrics.json.j2
new file mode 100755
index 0000000..2ee0efa
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/templates/smoketest_metrics.json.j2
@@ -0,0 +1,15 @@
+{
+  "metrics": [
+    {
+      "metricname": "AMBARI_METRICS.SmokeTest.FakeMetric",
+      "appid": "amssmoketestfake",
+      "hostname": "{{hostname}}",
+      "timestamp": {{current_time}},
+      "starttime": {{current_time}},
+      "metrics": {
+        "{{current_time}}": {{random1}},
+        "{{current_time + 1000}}": {{current_time}}
+      }
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/alerts.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/alerts.json
new file mode 100755
index 0000000..4603d22
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/alerts.json
@@ -0,0 +1,27 @@
+{
+  "FLUME": {
+    "service": [],
+    "FLUME_HANDLER": [
+      {
+        "name": "flume_agent_status",
+        "label": "Flume Agent Status",
+        "description": "This host-level alert is triggered if any of the expected flume agent processes are not available.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "SCRIPT",
+          "path": "FLUME/1.5.2.4.1/package/alerts/alert_flume_agent_status.py",
+          "parameters": [
+            {
+              "name": "run.directory",
+              "display_name": "Run Directory",
+              "value": "/var/run/flume",
+              "type": "STRING",
+              "description": "The directory where flume agent processes will place their PID files."
+            }
+          ]
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/configuration/flume-conf.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/configuration/flume-conf.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/configuration/flume-conf.xml
new file mode 100755
index 0000000..582e181
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/configuration/flume-conf.xml
@@ -0,0 +1,37 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+  <property>
+    <name>content</name>
+    <description>Flume agent configurations.
+    Specifying the configuration here applies to all hosts in this configuration-group. Please use host specific configuration-groups to provide different configurations on different hosts.
+    For configuring multiple agents on the same host, provide the combined agent configurations here. Each agent should have a different name.</description>
+    <value>
+# Flume agent config
+    </value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/configuration/flume-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/configuration/flume-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/configuration/flume-env.xml
new file mode 100755
index 0000000..a448a66
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/configuration/flume-env.xml
@@ -0,0 +1,90 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>flume_conf_dir</name>
+    <display-name>Flume Conf Dir</display-name>
+    <value>/etc/flume/conf</value>
+    <description>Location to save configuration files</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+  </property>
+  <property>
+    <name>flume_log_dir</name>
+    <display-name>Flume Log Dir</display-name>
+    <value>/var/log/flume</value>
+    <description>Location to save log files</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+  </property>
+  <property>
+    <name>flume_user</name>
+    <value>flume</value>
+    <property-type>USER</property-type>
+    <description>Flume User</description>
+  </property>
+
+  <!-- flume-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for flume-env.sh file</description>
+    <value>
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# If this file is placed at FLUME_CONF_DIR/flume-env.sh, it will be sourced
+# during Flume startup.
+
+# Enviroment variables can be set here.
+
+export JAVA_HOME={{java_home}}
+
+# Give Flume more memory and pre-allocate, enable remote monitoring via JMX
+# export JAVA_OPTS="-Xms100m -Xmx2000m -Dcom.sun.management.jmxremote"
+
+# Note that the Flume conf directory is always included in the classpath.
+#FLUME_CLASSPATH=""
+
+export HIVE_HOME={{flume_hive_home}}
+export HCAT_HOME={{flume_hcat_home}}
+    </value>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/metainfo.xml
new file mode 100755
index 0000000..a19eff1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/metainfo.xml
@@ -0,0 +1,69 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>FLUME</name>
+      <displayName>Flume</displayName>
+      <comment>A distributed service for collecting, aggregating, and moving large amounts of streaming data into HDFS</comment>
+      <version>1.5.2</version>
+      <components>
+        <component>
+          <name>FLUME_HANDLER</name>
+          <displayName>Flume</displayName>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/flume_handler.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>flume</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/flume_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <requiredServices>
+        <service>HDFS</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>flume-env</config-type>
+        <config-type>flume-conf</config-type>
+      </configuration-dependencies>
+
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/metrics.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/metrics.json
new file mode 100755
index 0000000..b0990ba
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/metrics.json
@@ -0,0 +1,430 @@
+{
+  "FLUME_HANDLER": {
+    "Component": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "default": {
+            "metrics/flume/flume/CHANNEL/ChannelCapacity": {
+              "metric": "ChannelCapacity",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/CHANNEL/StartTime": {
+              "metric": "StartTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/CHANNEL/EventTakeAttemptCount": {
+              "metric": "EventTakeAttemptCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/CHANNEL/EventTakeSuccessCount": {
+              "metric": "EventTakeSuccessCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/CHANNEL/EventPutAttemptCount": {
+              "metric": "EventPutAttemptCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/CHANNEL/StopTime": {
+              "metric": "StopTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/CHANNEL/ChannelFillPercentage": {
+              "metric": "ChannelFillPercentage",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/CHANNEL/ChannelSize": {
+              "metric": "ChannelSize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/CHANNEL/EventPutSuccessCount": {
+              "metric": "EventPutSuccessCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/SINK/ConnectionCreatedCount": {
+              "metric": "ConnectionCreatedCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/SINK/BatchCompleteCount": {
+              "metric": "BatchCompleteCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/SINK/EventDrainSuccessCount": {
+              "metric": "EventDrainSuccessCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/SINK/StartTime": {
+              "metric": "StartTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/SINK/EventDrainAttemptCount": {
+              "metric": "EventDrainAttemptCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/SINK/ConnectionFailedCount": {
+              "metric": "ConnectionFailedCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/SINK/BatchUnderflowCount": {
+              "metric": "BatchUnderflowCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/SINK/ConnectionClosedCount": {
+              "metric": "ConnectionClosedCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/SINK/StopTime": {
+              "metric": "StopTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/SINK/BatchEmptyCount": {
+              "metric": "BatchEmptyCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/SOURCE/AppendBatchReceivedCount": {
+              "metric": "AppendBatchReceivedCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/SOURCE/AppendAcceptedCount": {
+              "metric": "AppendAcceptedCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/SOURCE/StartTime": {
+              "metric": "StartTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/SOURCE/OpenConnectionCount": {
+              "metric": "OpenConnectionCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/SOURCE/AppendBatchAcceptedCount": {
+              "metric": "AppendBatchAcceptedCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/SOURCE/AppendReceivedCount": {
+              "metric": "AppendReceivedCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/SOURCE/EventReceivedCount": {
+              "metric": "EventReceivedCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/SOURCE/StopTime": {
+              "metric": "StopTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/SOURCE/EventAcceptedCount": {
+              "metric": "EventAcceptedCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+
+            "metrics/flume/flume/CHANNEL/EventTakeSuccessCount/rate/min": {
+              "metric": "EventTakeSuccessCount._rate._min",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/flume/flume/CHANNEL/EventTakeSuccessCount/rate/max": {
+              "metric": "EventTakeSuccessCount._rate._max",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/flume/flume/CHANNEL/EventTakeSuccessCount/rate/avg": {
+              "metric": "EventTakeSuccessCount._rate._avg",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/flume/flume/CHANNEL/EventTakeSuccessCount/rate/sum": {
+              "metric": "EventTakeSuccessCount._rate._sum",
+              "pointInTime": false,
+              "temporal": true
+            },
+
+            "metrics/flume/flume/CHANNEL/EventPutSuccessCount/rate/avg": {
+              "metric": "EventPutSuccessCount._rate._avg",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/flume/flume/CHANNEL/EventPutSuccessCount/rate/max": {
+              "metric": "EventPutSuccessCount._rate._max",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/flume/flume/CHANNEL/EventPutSuccessCount/rate/min": {
+              "metric": "EventPutSuccessCount._rate._min",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/flume/flume/CHANNEL/EventPutSuccessCount/rate/sum": {
+              "metric": "EventPutSuccessCount._rate._sum",
+              "pointInTime": false,
+              "temporal": true
+            },
+
+            "metrics/flume/flume/CHANNEL/ChannelSize/rate/avg": {
+              "metric": "ChannelSize._rate._avg",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/flume/flume/CHANNEL/ChannelSize/rate/max": {
+              "metric": "ChannelSize._rate._max",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/flume/flume/CHANNEL/ChannelSize/rate/min": {
+              "metric": "ChannelSize._rate._min",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/flume/flume/CHANNEL/ChannelSize/rate/sum": {
+              "metric": "ChannelSize._rate._sum",
+              "pointInTime": false,
+              "temporal": true
+            }
+
+          }
+        }
+      }
+    ],
+    "HostComponent": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "default": {
+            "metrics/flume/flume/CHANNEL/ChannelCapacity": {
+              "metric": "ChannelCapacity",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/CHANNEL/StartTime": {
+              "metric": "StartTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/CHANNEL/EventTakeAttemptCount": {
+              "metric": "EventTakeAttemptCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/CHANNEL/EventTakeSuccessCount": {
+              "metric": "EventTakeSuccessCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/CHANNEL/EventPutAttemptCount": {
+              "metric": "EventPutAttemptCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/CHANNEL/StopTime": {
+              "metric": "StopTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/CHANNEL/ChannelFillPercentage": {
+              "metric": "ChannelFillPercentage",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/CHANNEL/ChannelSize": {
+              "metric": "ChannelSize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/CHANNEL/EventPutSuccessCount": {
+              "metric": "EventPutSuccessCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/SINK/ConnectionCreatedCount": {
+              "metric": "ConnectionCreatedCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/SINK/BatchCompleteCount": {
+              "metric": "BatchCompleteCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/SINK/EventDrainSuccessCount": {
+              "metric": "EventDrainSuccessCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/SINK/StartTime": {
+              "metric": "StartTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/SINK/EventDrainAttemptCount": {
+              "metric": "EventDrainAttemptCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/SINK/ConnectionFailedCount": {
+              "metric": "ConnectionFailedCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/SINK/BatchUnderflowCount": {
+              "metric": "BatchUnderflowCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/SINK/ConnectionClosedCount": {
+              "metric": "ConnectionClosedCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/SINK/StopTime": {
+              "metric": "StopTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/SINK/BatchEmptyCount": {
+              "metric": "BatchEmptyCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/SOURCE/AppendBatchReceivedCount": {
+              "metric": "AppendBatchReceivedCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/SOURCE/AppendAcceptedCount": {
+              "metric": "AppendAcceptedCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/SOURCE/StartTime": {
+              "metric": "StartTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/SOURCE/OpenConnectionCount": {
+              "metric": "OpenConnectionCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/SOURCE/AppendBatchAcceptedCount": {
+              "metric": "AppendBatchAcceptedCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/SOURCE/AppendReceivedCount": {
+              "metric": "AppendReceivedCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/SOURCE/EventReceivedCount": {
+              "metric": "EventReceivedCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/SOURCE/StopTime": {
+              "metric": "StopTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/flume/flume/SOURCE/EventAcceptedCount": {
+              "metric": "EventAcceptedCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+
+            "metrics/flume/flume/CHANNEL/EventTakeSuccessCount/rate/avg": {
+              "metric": "EventTakeSuccessCount._rate._avg",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/flume/flume/CHANNEL/EventTakeSuccessCount/rate/max": {
+              "metric": "EventTakeSuccessCount._rate._max",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/flume/flume/CHANNEL/EventTakeSuccessCount/rate/min": {
+              "metric": "EventTakeSuccessCount._rate._min",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/flume/flume/CHANNEL/EventTakeSuccessCount/rate/sum": {
+              "metric": "EventTakeSuccessCount._rate._sum",
+              "pointInTime": false,
+              "temporal": true
+            },
+
+            "metrics/flume/flume/CHANNEL/EventPutSuccessCount/rate/avg": {
+              "metric": "EventPutSuccessCount._rate._avg",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/flume/flume/CHANNEL/EventPutSuccessCount/rate/max": {
+              "metric": "EventPutSuccessCount._rate._max",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/flume/flume/CHANNEL/EventPutSuccessCount/rate/min": {
+              "metric": "EventPutSuccessCount._rate._min",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/flume/flume/CHANNEL/EventPutSuccessCount/rate/sum": {
+              "metric": "EventPutSuccessCount._rate._sum",
+              "pointInTime": false,
+              "temporal": true
+            },
+
+            "metrics/flume/flume/CHANNEL/ChannelSize/rate/avg": {
+              "metric": "ChannelSize._rate._avg",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/flume/flume/CHANNEL/ChannelSize/rate/max": {
+              "metric": "ChannelSize._rate._max",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/flume/flume/CHANNEL/ChannelSize/rate/min": {
+              "metric": "ChannelSize._rate._min",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/flume/flume/CHANNEL/ChannelSize/rate/sum": {
+              "metric": "ChannelSize._rate._sum",
+              "pointInTime": false,
+              "temporal": true
+            }
+
+          }
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/package/alerts/alert_flume_agent_status.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/package/alerts/alert_flume_agent_status.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/package/alerts/alert_flume_agent_status.py
new file mode 100755
index 0000000..df08bb1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/package/alerts/alert_flume_agent_status.py
@@ -0,0 +1,106 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import socket
+
+from resource_management.libraries.functions.flume_agent_helper import find_expected_agent_names
+from resource_management.libraries.functions.flume_agent_helper import get_flume_status
+
+RESULT_CODE_OK = 'OK'
+RESULT_CODE_CRITICAL = 'CRITICAL'
+RESULT_CODE_UNKNOWN = 'UNKNOWN'
+
+FLUME_CONF_DIR_KEY = '{{flume-env/flume_conf_dir}}'
+
+FLUME_RUN_DIR_KEY = "run.directory"
+FLUME_RUN_DIR_DEFAULT = '/var/run/flume'
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (FLUME_CONF_DIR_KEY,)
+
+
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  if configurations is None:
+    return (RESULT_CODE_UNKNOWN, ['There were no configurations supplied to the script.'])
+
+  flume_conf_directory = None
+  if FLUME_CONF_DIR_KEY in configurations:
+    flume_conf_directory = configurations[FLUME_CONF_DIR_KEY]
+
+  if flume_conf_directory is None:
+    return (RESULT_CODE_UNKNOWN, ['The Flume configuration directory is a required parameter.'])
+
+  if host_name is None:
+    host_name = socket.getfqdn()
+
+  # parse script arguments
+  flume_run_directory = FLUME_RUN_DIR_DEFAULT
+  if FLUME_RUN_DIR_KEY in parameters:
+    flume_run_directory = parameters[FLUME_RUN_DIR_KEY]
+
+  processes = get_flume_status(flume_conf_directory, flume_run_directory)
+  expected_agents = find_expected_agent_names(flume_conf_directory)
+
+  alert_label = ''
+  alert_state = RESULT_CODE_OK
+
+  if len(processes) == 0 and len(expected_agents) == 0:
+    alert_label = 'No agents defined on {0}'.format(host_name)
+  else:
+    ok = []
+    critical = []
+    text_arr = []
+
+    for process in processes:
+      if not process.has_key('status') or process['status'] == 'NOT_RUNNING':
+        critical.append(process['name'])
+      else:
+        ok.append(process['name'])
+
+    if len(critical) > 0:
+      text_arr.append("{0} {1} NOT running".format(", ".join(critical),
+        "is" if len(critical) == 1 else "are"))
+
+    if len(ok) > 0:
+      text_arr.append("{0} {1} running".format(", ".join(ok),
+        "is" if len(ok) == 1 else "are"))
+
+    plural = len(critical) > 1 or len(ok) > 1
+    alert_label = "Agent{0} {1} {2}".format(
+      "s" if plural else "",
+      " and ".join(text_arr),
+      "on " + host_name)
+
+    alert_state = RESULT_CODE_CRITICAL if len(critical) > 0 else RESULT_CODE_OK
+
+  return (alert_state, [alert_label])

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/package/scripts/flume.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/package/scripts/flume.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/package/scripts/flume.py
new file mode 100755
index 0000000..8edba7f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/package/scripts/flume.py
@@ -0,0 +1,228 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import glob
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+import os
+from resource_management import *
+from resource_management.libraries.functions.flume_agent_helper import is_flume_process_live
+from resource_management.libraries.functions.flume_agent_helper import find_expected_agent_names
+from resource_management.libraries.functions.flume_agent_helper import await_flume_process_termination
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def flume(action = None):
+  import params
+
+  if action == 'config':
+    # remove previously defined meta's
+    for n in find_expected_agent_names(params.flume_conf_dir):
+      File(os.path.join(params.flume_conf_dir, n, 'ambari-meta.json'),
+        action = "delete",
+      )
+
+    Directory(params.flume_run_dir,
+    )
+
+    Directory(params.flume_conf_dir,
+              create_parents=True,
+              owner=params.flume_user,
+              )
+    Directory(params.flume_log_dir, owner=params.flume_user)
+
+    flume_agents = {}
+    if params.flume_conf_content is not None:
+      flume_agents = build_flume_topology(params.flume_conf_content)
+
+    for agent in flume_agents.keys():
+      flume_agent_conf_dir = os.path.join(params.flume_conf_dir, agent)
+      flume_agent_conf_file = os.path.join(flume_agent_conf_dir, 'flume.conf')
+      flume_agent_meta_file = os.path.join(flume_agent_conf_dir, 'ambari-meta.json')
+      flume_agent_log4j_file = os.path.join(flume_agent_conf_dir, 'log4j.properties')
+      flume_agent_env_file = os.path.join(flume_agent_conf_dir, 'flume-env.sh')
+
+      Directory(flume_agent_conf_dir,
+                owner=params.flume_user,
+                )
+
+      PropertiesFile(flume_agent_conf_file,
+        properties=flume_agents[agent],
+        owner=params.flume_user,
+        mode = 0644)
+
+      File(flume_agent_log4j_file,
+        content=Template('log4j.properties.j2', agent_name = agent),
+        owner=params.flume_user,
+        mode = 0644)
+
+      File(flume_agent_meta_file,
+        content = json.dumps(ambari_meta(agent, flume_agents[agent])),
+        owner=params.flume_user,
+        mode = 0644)
+
+      File(flume_agent_env_file,
+           owner=params.flume_user,
+           content=InlineTemplate(params.flume_env_sh_template)
+      )
+
+      if params.has_metric_collector:
+        File(os.path.join(flume_agent_conf_dir, "flume-metrics2.properties"),
+             owner=params.flume_user,
+             content=Template("flume-metrics2.properties.j2")
+        )
+
+  elif action == 'start':
+    # desired state for service should be STARTED
+    if len(params.flume_command_targets) == 0:
+      _set_desired_state('STARTED')
+
+    # It is important to run this command as a background process.
+
+    flume_base = as_user(format("{flume_bin} agent --name {{0}} --conf {{1}} --conf-file {{2}} {{3}} > {flume_log_dir}/{{4}}.out 2>&1"), params.flume_user, env={'JAVA_HOME': params.java_home}) + " &"
+
+    for agent in cmd_target_names():
+      flume_agent_conf_dir = params.flume_conf_dir + os.sep + agent
+      flume_agent_conf_file = flume_agent_conf_dir + os.sep + "flume.conf"
+      flume_agent_pid_file = params.flume_run_dir + os.sep + agent + ".pid"
+
+      if not os.path.isfile(flume_agent_conf_file):
+        continue
+
+      if not is_flume_process_live(flume_agent_pid_file):
+        # TODO someday make the ganglia ports configurable
+        extra_args = ''
+        if params.ganglia_server_host is not None:
+          extra_args = '-Dflume.monitoring.type=ganglia -Dflume.monitoring.hosts={0}:{1}'
+          extra_args = extra_args.format(params.ganglia_server_host, '8655')
+        if params.has_metric_collector:
+          extra_args = '-Dflume.monitoring.type=org.apache.hadoop.metrics2.sink.flume.FlumeTimelineMetricsSink ' \
+                       '-Dflume.monitoring.node={0}:{1}'
+          extra_args = extra_args.format(params.metric_collector_host, params.metric_collector_port)
+
+        flume_cmd = flume_base.format(agent, flume_agent_conf_dir,
+           flume_agent_conf_file, extra_args, agent)
+
+        Execute(flume_cmd,
+          wait_for_finish=False,
+          environment={'JAVA_HOME': params.java_home}
+        )
+        # sometimes startup spawns a couple of threads - so only the first line may count
+        pid_cmd = as_sudo(('pgrep', '-o', '-u', params.flume_user, '-f', format('^{java_home}.*{agent}.*'))) + \
+        " | " + as_sudo(('tee', flume_agent_pid_file)) + "  && test ${PIPESTATUS[0]} -eq 0"
+        Execute(pid_cmd,
+                logoutput=True,
+                tries=20,
+                try_sleep=10)
+
+    pass
+  elif action == 'stop':
+    # desired state for service should be INSTALLED
+    if len(params.flume_command_targets) == 0:
+      _set_desired_state('INSTALLED')
+
+    pid_files = glob.glob(params.flume_run_dir + os.sep + "*.pid")
+
+    if 0 == len(pid_files):
+      return
+
+    agent_names = cmd_target_names()
+
+
+    for agent in agent_names:
+      pid_file = format("{flume_run_dir}/{agent}.pid")
+
+      if is_flume_process_live(pid_file):
+        pid = shell.checked_call(("cat", pid_file), sudo=True)[1].strip()
+        Execute(("kill", "-15", pid), sudo=True)    # kill command has to be a tuple
+
+      if not await_flume_process_termination(pid_file):
+        raise Fail("Can't stop flume agent: {0}".format(agent))
+
+      File(pid_file, action = 'delete')
+
+
+def ambari_meta(agent_name, agent_conf):
+  res = {}
+
+  sources = agent_conf[agent_name + '.sources'].split(' ')
+  res['sources_count'] = len(sources)
+
+  sinks = agent_conf[agent_name + '.sinks'].split(' ')
+  res['sinks_count'] = len(sinks)
+
+  channels = agent_conf[agent_name + '.channels'].split(' ')
+  res['channels_count'] = len(channels)
+
+  return res
+
+
+# define a map of dictionaries, where the key is agent name
+# and the dictionary is the name/value pair
+def build_flume_topology(content):
+
+  result = {}
+  agent_names = []
+
+  for line in content.split('\n'):
+    rline = line.strip()
+    if 0 != len(rline) and not rline.startswith('#'):
+      pair = rline.split('=')
+      lhs = pair[0].strip()
+      rhs = pair[1].strip()
+
+      part0 = lhs.split('.')[0]
+
+      if lhs.endswith(".sources"):
+        agent_names.append(part0)
+
+      if not result.has_key(part0):
+        result[part0] = {}
+
+      result[part0][lhs] = rhs
+
+  # trim out non-agents
+  for k in result.keys():
+    if not k in agent_names:
+      del result[k]
+
+  return result
+
+
+def cmd_target_names():
+  import params
+
+  if len(params.flume_command_targets) > 0:
+    return params.flume_command_targets
+  else:
+    return find_expected_agent_names(params.flume_conf_dir)
+
+
+def _set_desired_state(state):
+  import params
+  File(params.ambari_state_file,
+    content = state,
+  )
+
+def get_desired_state():
+  import params
+  from resource_management.core import sudo
+  if os.path.exists(params.ambari_state_file):
+    return sudo.read_file(params.ambari_state_file)
+  else:
+    return 'INSTALLED'

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/package/scripts/flume_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/package/scripts/flume_check.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/package/scripts/flume_check.py
new file mode 100755
index 0000000..64cd1c1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/package/scripts/flume_check.py
@@ -0,0 +1,40 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+
+class FlumeServiceCheck(Script):
+
+  @OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    if params.security_enabled:
+      principal_replaced = params.http_principal.replace("_HOST", params.hostname)
+      Execute(format("{kinit_path_local} -kt {http_keytab} {principal_replaced}"),
+              user=params.smoke_user)
+
+    Execute(format('env JAVA_HOME={java_home} {flume_bin} version'),
+            logoutput=True,
+            tries = 3,
+            try_sleep = 20)
+
+if __name__ == "__main__":
+  FlumeServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/package/scripts/flume_handler.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/package/scripts/flume_handler.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/package/scripts/flume_handler.py
new file mode 100755
index 0000000..d0c6e3b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/package/scripts/flume_handler.py
@@ -0,0 +1,145 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import flume_upgrade
+
+from flume import flume
+from flume import get_desired_state
+
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.flume_agent_helper import find_expected_agent_names
+from resource_management.libraries.functions.flume_agent_helper import get_flume_status
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
+from resource_management.libraries.functions import Direction
+
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+
+class FlumeHandler(Script):
+
+  def get_component_name(self):
+    return "flume-server"
+
+  def install(self, env):
+    import params
+    self.install_packages(env)
+    env.set_params(params)
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env)
+    flume(action='start')
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    flume(action='stop')
+
+    if upgrade_type is not None and params.upgrade_direction == Direction.UPGRADE:
+      flume_upgrade.post_stop_backup()
+    #if rolling_restart:
+    #  flume_upgrade.post_stop_backup()
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    flume(action='config')
+
+  def status(self, env):
+    import params
+    env.set_params(params)
+    processes = get_flume_status(params.flume_conf_dir, params.flume_run_dir)
+    expected_agents = find_expected_agent_names(params.flume_conf_dir)
+
+    json = {}
+    json['processes'] = processes
+    json['alerts'] = []
+
+    alert = {}
+    alert['name'] = 'flume_agent'
+    alert['label'] = 'Flume Agent process'
+
+    if len(processes) == 0 and len(expected_agents) == 0:
+      alert['state'] = 'OK'
+
+      if not params.hostname is None:
+        alert['text'] = 'No agents defined on ' + params.hostname
+      else:
+        alert['text'] = 'No agents defined'
+
+    else:
+      crit = []
+      ok = []
+
+      for proc in processes:
+        if not proc.has_key('status') or proc['status'] == 'NOT_RUNNING':
+          crit.append(proc['name'])
+        else:
+          ok.append(proc['name'])
+
+      text_arr = []
+
+      if len(crit) > 0:
+        text_arr.append("{0} {1} NOT running".format(", ".join(crit),
+          "is" if len(crit) == 1 else "are"))
+
+      if len(ok) > 0:
+        text_arr.append("{0} {1} running".format(", ".join(ok),
+          "is" if len(ok) == 1 else "are"))
+
+      plural = len(crit) > 1 or len(ok) > 1
+      alert['text'] = "Agent{0} {1} {2}".format(
+        "s" if plural else "",
+        " and ".join(text_arr),
+        "" if params.hostname is None else "on " + str(params.hostname))
+
+      alert['state'] = 'CRITICAL' if len(crit) > 0 else 'OK'
+
+    json['alerts'].append(alert)
+    self.put_structured_out(json)
+
+    # only throw an exception if there are agents defined and there is a
+    # problem with the processes; if there are no agents defined, then
+    # the service should report STARTED (green) ONLY if the desired state is started.  otherwise, INSTALLED (red)
+    if len(expected_agents) > 0:
+      for proc in processes:
+        if not proc.has_key('status') or proc['status'] == 'NOT_RUNNING':
+          raise ComponentIsNotRunning()
+    elif len(expected_agents) == 0 and 'INSTALLED' == get_desired_state():
+      raise ComponentIsNotRunning()
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    # this function should not execute if the version can't be determined or
+    # is not at least IOP 4.0.0.0
+    if not params.version or compare_versions(format_stack_version(params.version), '4.0.0.0') < 0:
+      return
+
+    Logger.info("Executing Flume Stack Upgrade pre-restart")
+    conf_select.select(params.stack_name, "flume", params.version)
+    stack_select.select("flume-server", params.version)
+    if params.upgrade_direction == Direction.UPGRADE:
+      flume_upgrade.pre_start_restore()
+
+if __name__ == "__main__":
+  FlumeHandler().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/package/scripts/flume_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/package/scripts/flume_upgrade.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/package/scripts/flume_upgrade.py
new file mode 100755
index 0000000..181668b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/package/scripts/flume_upgrade.py
@@ -0,0 +1,94 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+import shutil
+import tarfile
+import tempfile
+
+from resource_management.core.logger import Logger
+from resource_management.core.exceptions import Fail
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions import tar_archive
+
+BACKUP_TEMP_DIR = "flume-upgrade-backup"
+BACKUP_CONF_DIR_ARCHIVE = "flume-conf-backup.tar"
+
+def post_stop_backup():
+  """
+  Backs up the flume config, config dir, file/spillable channels as part of the
+  upgrade process.
+  :return:
+  """
+  Logger.info('Backing up Flume data and configuration before upgrade...')
+  directoryMappings = _get_directory_mappings()
+
+  absolute_backup_dir = os.path.join(tempfile.gettempdir(), BACKUP_TEMP_DIR)
+  if not os.path.isdir(absolute_backup_dir):
+    os.makedirs(absolute_backup_dir)
+
+  for directory in directoryMappings:
+    if not os.path.isdir(directory):
+      raise Fail("Unable to backup missing directory {0}".format(directory))
+
+    archive = os.path.join(absolute_backup_dir, directoryMappings[directory])
+    Logger.info('Compressing {0} to {1}'.format(directory, archive))
+
+    if os.path.exists(archive):
+      os.remove(archive)
+
+    # backup the directory, following symlinks instead of including them
+    tar_archive.archive_directory_dereference(archive, directory)
+
+
+def pre_start_restore():
+  """
+  Restores the flume config, config dir, file/spillable channels to their proper locations
+  after an upgrade has completed.
+  :return:
+  """
+  Logger.info('Restoring Flume data and configuration after upgrade...')
+  directoryMappings = _get_directory_mappings()
+
+  for directory in directoryMappings:
+    archive = os.path.join(tempfile.gettempdir(), BACKUP_TEMP_DIR,
+      directoryMappings[directory])
+
+    if os.path.isfile(archive):
+      Logger.info('Extracting {0} to {1}'.format(archive, directory))
+      tarball = None
+      try:
+        tarball = tarfile.open(archive, "r")
+        tarball.extractall(directory)
+      finally:
+        if tarball:
+          tarball.close()
+
+    # cleanup
+    if os.path.exists(os.path.join(tempfile.gettempdir(), BACKUP_TEMP_DIR)):
+      shutil.rmtree(os.path.join(tempfile.gettempdir(), BACKUP_TEMP_DIR))
+
+def _get_directory_mappings():
+  """
+  Gets a dictionary of directory to archive name that represents the
+  directories that need to be backed up and their output tarball archive targets
+  :return:  the dictionary of directory to tarball mappings
+  """
+  import params
+
+  return { params.flume_conf_dir : BACKUP_CONF_DIR_ARCHIVE}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/package/scripts/params.py
new file mode 100755
index 0000000..f9addb9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/package/scripts/params.py
@@ -0,0 +1,101 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.libraries.functions.constants import Direction
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.script.script import Script
+
+from params_linux import *
+
+config = Script.get_config()
+
+stack_name = default("/hostLevelParams/stack_name", None)
+upgrade_direction = default("/commandParams/upgrade_direction", Direction.UPGRADE)
+host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+
+# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
+version = default("/commandParams/version", None)
+
+user_group = config['configurations']['cluster-env']['user_group']
+proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
+
+security_enabled = False
+
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+iop_stack_version = format_stack_version(stack_version_unformatted)
+
+# hadoop default parameters
+flume_bin = '/usr/iop/current/flume-server/bin/flume-ng'
+
+# hadoop parameters for 2.2+
+flume_hive_home = '/usr/iop/current/hive-metastore'
+flume_hcat_home = '/usr/iop/current/hive-webhcat'
+
+# flume agent runtime for management
+flume_log_dir = config['configurations']['flume-env']['flume_log_dir']
+
+# flume_run_dir is a new parameter added since Ambari 2.1 in IOP 4.1
+# The default value must be the same as the value of "flume_run_dir" in params.py of Ambari 1.7 in IOP 4.0
+# See ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/package/scripts/params.py
+FLUME_RUN_DIR_DEFAULT = '/var/run/flume'
+if (('flume-env' in config['configurations']) and ('flume_run_dir' in config['configurations']['flume-env'])):
+  flume_run_dir = config['configurations']['flume-env']['flume_run_dir']
+else:
+  flume_run_dir = FLUME_RUN_DIR_DEFAULT
+
+java_home = config['hostLevelParams']['java_home']
+ambari_state_file = format("{flume_run_dir}/ambari-state.txt")
+
+if 'flume-env' in config['configurations'] and 'flume_user' in config['configurations']['flume-env']:
+  flume_user = config['configurations']['flume-env']['flume_user']
+
+if (('flume-conf' in config['configurations']) and('content' in config['configurations']['flume-conf'])):
+  flume_conf_content = config['configurations']['flume-conf']['content']
+else:
+  flume_conf_content = None
+
+if (('flume-log4j' in config['configurations']) and ('content' in config['configurations']['flume-log4j'])):
+  flume_log4j_content = config['configurations']['flume-log4j']['content']
+else:
+  flume_log4j_content = None
+
+targets = default('/commandParams/flume_handler', None)
+flume_command_targets = [] if targets is None else targets.split(',')
+
+flume_env_sh_template = config['configurations']['flume-env']['content']
+
+ganglia_server_hosts = default('/clusterHostInfo/ganglia_server_host', [])
+ganglia_server_host = None
+if 0 != len(ganglia_server_hosts):
+  ganglia_server_host = ganglia_server_hosts[0]
+
+hostname = None
+if config.has_key('hostname'):
+  hostname = config['hostname']
+
+ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
+has_metric_collector = not len(ams_collector_hosts) == 0
+if has_metric_collector:
+  metric_collector_host = ams_collector_hosts[0]
+  metric_collector_port = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
+  if metric_collector_port and metric_collector_port.find(':') != -1:
+    metric_collector_port = metric_collector_port.split(':')[1]
+  pass

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/package/scripts/params_linux.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/package/scripts/params_linux.py
new file mode 100755
index 0000000..7ecbe27
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/package/scripts/params_linux.py
@@ -0,0 +1,30 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.libraries.functions import format
+from resource_management.libraries.script.script import Script
+
+# server configurations
+config = Script.get_config()
+
+flume_conf_dir = '/usr/iop/current/flume-server/conf'
+
+flume_user = 'flume'
+flume_group = 'flume'
+if 'flume-env' in config['configurations'] and 'flume_user' in config['configurations']['flume-env']:
+  flume_user = config['configurations']['flume-env']['flume_user']
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/package/templates/flume-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/package/templates/flume-metrics2.properties.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/package/templates/flume-metrics2.properties.j2
new file mode 100755
index 0000000..bc18043
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/package/templates/flume-metrics2.properties.j2
@@ -0,0 +1,26 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+collector={{metric_collector_host}}
+port={{metric_collector_port}}
+collectionFrequency=60000
+maxRowCacheSize=10000
+sendInterval=59000
+
+# Metric names having type COUNTER
+counters=EventTakeSuccessCount,EventPutSuccessCount,EventTakeAttemptCount,EventPutAttemptCount

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/package/templates/flume.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/package/templates/flume.conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/package/templates/flume.conf.j2
new file mode 100755
index 0000000..4dee67f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/FLUME/package/templates/flume.conf.j2
@@ -0,0 +1,24 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+
+# flume.conf: Add your flume configuration here and start flume
+#             Note if you are using the Windows srvice or Unix service
+#             provided by the HDP distribution, they will assume the
+#             agent's name in this file to be 'a1'
+#
+{{flume_agent_conf_content}}


[36/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/namenode.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/namenode.py
new file mode 100755
index 0000000..46bd926
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/namenode.py
@@ -0,0 +1,319 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+import os
+import json
+import  tempfile
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_XML
+from resource_management.libraries.functions.version import compare_versions, \
+  format_stack_version
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.core.exceptions import Fail
+from resource_management.libraries.functions import get_klist_path
+
+import namenode_upgrade
+from hdfs_namenode import namenode, wait_for_safemode_off
+from hdfs import hdfs
+import hdfs_rebalance
+from utils import failover_namenode, get_dfsadmin_base_command
+from ambari_commons.os_family_impl import OsFamilyImpl
+from ambari_commons import OSConst
+
+# hashlib is supplied as of Python 2.5 as the replacement interface for md5
+# and other secure hashes.  In 2.6, md5 is deprecated.  Import hashlib if
+# available, avoiding a deprecation warning under 2.6.  Import md5 otherwise,
+# preserving 2.4 compatibility.
+try:
+  import hashlib
+  _md5 = hashlib.md5
+except ImportError:
+  import md5
+  _md5 = md5.new
+
+class NameNode(Script):
+
+  def get_component_name(self):
+    return "hadoop-hdfs-namenode"
+
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+    #TODO we need this for HA because of manual steps
+    self.configure(env)
+
+  def prepare_rolling_upgrade(self, env):
+    namenode_upgrade.prepare_rolling_upgrade()
+
+  def finalize_rolling_upgrade(self, env):
+    namenode_upgrade.finalize_rolling_upgrade()
+
+  def wait_for_safemode_off(self, env):
+    wait_for_safemode_off(30, True)
+
+  def finalize_non_rolling_upgrade(self, env):
+    namenode_upgrade.finalize_upgrade("nonrolling")
+
+  def finalize_rolling_upgrade(self, env):
+    namenode_upgrade.finalize_upgrade("rolling")
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Stack Upgrade pre-restart")
+    import params
+    env.set_params(params)
+
+    if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-hdfs-namenode", params.version)
+      #Execute(format("stack-select set hadoop-hdfs-namenode {version}"))
+
+  def start(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    self.configure(env)
+    namenode(action="start", upgrade_type=upgrade_type, env=env)
+
+  def post_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Stack Upgrade post-restart")
+    import params
+    env.set_params(params)
+
+    dfsadmin_base_command = get_dfsadmin_base_command('hdfs')
+    dfsadmin_cmd = dfsadmin_base_command + " -report -live"
+    Execute(dfsadmin_cmd,
+            user=params.hdfs_user,
+            tries=60,
+            try_sleep=10
+    )
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if upgrade_type == "rolling" and params.dfs_ha_enabled:
+      if params.dfs_ha_automatic_failover_enabled:
+        failover_namenode()
+      else:
+        raise Fail("Rolling Upgrade - dfs.ha.automatic-failover.enabled must be enabled to perform a rolling restart")
+
+    namenode(action="stop", upgrade_type=upgrade_type, env=env)
+
+  def configure(self, env):
+    import params
+
+    env.set_params(params)
+    hdfs()
+    namenode(action="configure", env=env)
+    pass
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    check_process_status(status_params.namenode_pid_file)
+    pass
+
+  def security_status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    props_value_check = {"hadoop.security.authentication": "kerberos",
+                         "hadoop.security.authorization": "true"}
+    props_empty_check = ["hadoop.security.auth_to_local"]
+    props_read_check = None
+    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
+                                                props_read_check)
+    props_value_check = None
+    props_empty_check = ['dfs.namenode.kerberos.internal.spnego.principal',
+                         'dfs.namenode.keytab.file',
+                         'dfs.namenode.kerberos.principal']
+    props_read_check = ['dfs.namenode.keytab.file']
+    hdfs_site_expectations = build_expectations('hdfs-site', props_value_check, props_empty_check,
+                                                props_read_check)
+
+    hdfs_expectations = {}
+    hdfs_expectations.update(core_site_expectations)
+    hdfs_expectations.update(hdfs_site_expectations)
+
+    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
+                                                 {'core-site.xml': FILE_TYPE_XML,
+                                                  'hdfs-site.xml': FILE_TYPE_XML})
+    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
+        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
+      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
+      if not result_issues:  # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if ( 'hdfs-site' not in security_params
+               or 'dfs.namenode.keytab.file' not in security_params['hdfs-site']
+               or 'dfs.namenode.kerberos.principal' not in security_params['hdfs-site']):
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out(
+              {"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.hdfs_user,
+                                security_params['hdfs-site']['dfs.namenode.keytab.file'],
+                                security_params['hdfs-site']['dfs.namenode.kerberos.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+
+  def decommission(self, env):
+    import params
+
+    env.set_params(params)
+    namenode(action="decommission")
+    pass
+
+
+  def rebalancehdfs(self, env):
+    import params
+    env.set_params(params)
+
+    name_node_parameters = json.loads( params.name_node_params )
+    threshold = name_node_parameters['threshold']
+    _print("Starting balancer with threshold = %s\n" % threshold)
+
+    rebalance_env = {'PATH': params.hadoop_bin_dir}
+
+    if params.security_enabled:
+      # Create the kerberos credentials cache (ccache) file and set it in the environment to use
+      # when executing HDFS rebalance command. Use the md5 hash of the combination of the principal and keytab file
+      # to generate a (relatively) unique cache filename so that we can use it as needed.
+      # TODO: params.tmp_dir=/var/lib/ambari-agent/data/tmp. However hdfs user doesn't have access to this path.
+      # TODO: Hence using /tmp
+      ccache_file_name = "hdfs_rebalance_cc_" + _md5(format("{hdfs_principal_name}|{hdfs_user_keytab}")).hexdigest()
+      ccache_file_path = os.path.join(tempfile.gettempdir(), ccache_file_name)
+      rebalance_env['KRB5CCNAME'] = ccache_file_path
+
+      # If there are no tickets in the cache or they are expired, perform a kinit, else use what
+      # is in the cache
+      klist_cmd = format("{klist_path_local} -s {ccache_file_path}")
+      kinit_cmd = format("{kinit_path_local} -c {ccache_file_path} -kt {hdfs_user_keytab} {hdfs_principal_name}")
+      if os.system(klist_cmd) != 0:
+        Execute(kinit_cmd, user=params.hdfs_user)
+
+    def calculateCompletePercent(first, current):
+      return 1.0 - current.bytesLeftToMove/first.bytesLeftToMove
+
+
+    def startRebalancingProcess(threshold, rebalance_env):
+      rebalanceCommand = format('hdfs --config {hadoop_conf_dir} balancer -threshold {threshold}')
+      return as_user(rebalanceCommand, params.hdfs_user, env=rebalance_env)
+
+    command = startRebalancingProcess(threshold, rebalance_env)
+
+    basedir = os.path.join(env.config.basedir, 'scripts')
+    if(threshold == 'DEBUG'): #FIXME TODO remove this on PROD
+      basedir = os.path.join(env.config.basedir, 'scripts', 'balancer-emulator')
+      command = ['python','hdfs-command.py']
+
+    _print("Executing command %s\n" % command)
+
+    parser = hdfs_rebalance.HdfsParser()
+
+    def handle_new_line(line, is_stderr):
+      if is_stderr:
+        return
+
+      _print('[balancer] %s' % (line))
+      pl = parser.parseLine(line)
+      if pl:
+        res = pl.toJson()
+        res['completePercent'] = calculateCompletePercent(parser.initialLine, pl)
+
+        self.put_structured_out(res)
+      elif parser.state == 'PROCESS_FINISHED' :
+        _print('[balancer] %s' % ('Process is finished' ))
+        self.put_structured_out({'completePercent' : 1})
+        return
+
+    Execute(command,
+            on_new_line = handle_new_line,
+            logoutput = False,
+    )
+
+    if params.security_enabled and os.path.exists(ccache_file_path):
+      # Delete the kerberos credentials cache (ccache) file
+      os.remove(ccache_file_path)
+
+  def prepare_express_upgrade(self, env):
+    """
+    During an Express Upgrade.
+    If in HA, on the Active NameNode only, examine the directory dfs.namenode.name.dir and
+    make sure that there is no "/previous" directory.
+
+    Create a list of all the DataNodes in the cluster.
+    hdfs dfsadmin -report > dfs-old-report-1.log
+
+    hdfs dfsadmin -safemode enter
+    hdfs dfsadmin -saveNamespace
+
+    Copy the checkpoint files located in ${dfs.namenode.name.dir}/current into a backup directory.
+
+    Finalize any prior HDFS upgrade,
+    hdfs dfsadmin -finalizeUpgrade
+
+    Prepare for a NameNode rolling upgrade in order to not lose any data.
+    hdfs dfsadmin -rollingUpgrade prepare
+    """
+    import params
+    Logger.info("Preparing the NameNodes for a NonRolling (aka Express) Upgrade.")
+
+    if params.security_enabled:
+      kinit_command = format("{params.kinit_path_local} -kt {params.hdfs_user_keytab} {params.hdfs_principal_name}")
+      Execute(kinit_command, user=params.hdfs_user, logoutput=True)
+
+    namenode_upgrade.prepare_upgrade_check_for_previous_dir()
+    namenode_upgrade.prepare_upgrade_enter_safe_mode()
+    namenode_upgrade.prepare_upgrade_save_namespace()
+    namenode_upgrade.prepare_upgrade_backup_namenode_dir()
+    namenode_upgrade.prepare_upgrade_finalize_previous_upgrades()
+
+    # Call -rollingUpgrade prepare
+    namenode_upgrade.prepare_rolling_upgrade()
+
+def _print(line):
+  sys.stdout.write(line)
+  sys.stdout.flush()
+
+if __name__ == "__main__":
+  NameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/namenode_ha_state.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/namenode_ha_state.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/namenode_ha_state.py
new file mode 100755
index 0000000..a5893b6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/namenode_ha_state.py
@@ -0,0 +1,205 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+
+from resource_management.core.logger import Logger
+from resource_management.libraries.functions.default import default
+from utils import get_value_from_jmx
+
+
+class NAMENODE_STATE:
+  ACTIVE = "active"
+  STANDBY = "standby"
+  UNKNOWN = "unknown"
+
+
+class NamenodeHAState:
+  """
+  Represents the current state of the Namenode Hosts in High Availability Mode
+  """
+
+  def __init__(self):
+    """
+    Initializes all fields by querying the Namenode state.
+    Raises a ValueError if unable to construct the object.
+    """
+    import params
+
+    self.name_service = default("/configurations/hdfs-site/dfs.nameservices", None)
+    if not self.name_service:
+      raise ValueError("Could not retrieve property dfs.nameservices")
+
+    nn_unique_ids_key = "dfs.ha.namenodes." + str(self.name_service)
+    # List of the nn unique ids
+    self.nn_unique_ids = default("/configurations/hdfs-site/" + nn_unique_ids_key, None)
+    if not self.nn_unique_ids:
+      raise ValueError("Could not retrieve property " + nn_unique_ids_key)
+
+    self.nn_unique_ids = self.nn_unique_ids.split(",")
+    self.nn_unique_ids = [x.strip() for x in self.nn_unique_ids]
+
+    policy = default("/configurations/hdfs-site/dfs.http.policy", "HTTP_ONLY")
+    self.encrypted = policy.upper() == "HTTPS_ONLY"
+
+    jmx_uri_fragment = ("https" if self.encrypted else "http") + "://{0}/jmx?qry=Hadoop:service=NameNode,name=NameNodeStatus"
+    namenode_http_fragment = "dfs.namenode.http-address.{0}.{1}"
+    namenode_https_fragment = "dfs.namenode.https-address.{0}.{1}"
+
+    # Dictionary where the key is the Namenode State (e.g., ACTIVE), and the value is a set of hostnames
+    self.namenode_state_to_hostnames = {}
+
+    # Dictionary from nn unique id name to a tuple of (http address, https address)
+    self.nn_unique_id_to_addresses = {}
+    for nn_unique_id in self.nn_unique_ids:
+      http_key = namenode_http_fragment.format(self.name_service, nn_unique_id)
+      https_key = namenode_https_fragment.format(self.name_service, nn_unique_id)
+
+      http_value = default("/configurations/hdfs-site/" + http_key, None)
+      https_value = default("/configurations/hdfs-site/" + https_key, None)
+      actual_value = https_value if self.encrypted else http_value
+      hostname = actual_value.split(":")[0].strip() if actual_value and ":" in actual_value else None
+
+      self.nn_unique_id_to_addresses[nn_unique_id] = (http_value, https_value)
+      try:
+        if not hostname:
+          raise Exception("Could not retrieve hostname from address " + actual_value)
+
+        jmx_uri = jmx_uri_fragment.format(actual_value)
+        state = get_value_from_jmx(jmx_uri, "State")
+
+        if not state:
+          raise Exception("Could not retrieve Namenode state from URL " + jmx_uri)
+
+        state = state.lower()
+
+        if state not in [NAMENODE_STATE.ACTIVE, NAMENODE_STATE.STANDBY]:
+          state = NAMENODE_STATE.UNKNOWN
+
+        if state in self.namenode_state_to_hostnames:
+          self.namenode_state_to_hostnames[state].add(hostname)
+        else:
+          hostnames = set([hostname, ])
+          self.namenode_state_to_hostnames[state] = hostnames
+      except:
+        Logger.error("Could not get namenode state for " + nn_unique_id)
+
+  def __str__(self):
+    return "Namenode HA State: {\n" + \
+           ("IDs: %s\n"       % ", ".join(self.nn_unique_ids)) + \
+           ("Addresses: %s\n" % str(self.nn_unique_id_to_addresses)) + \
+           ("States: %s\n"    % str(self.namenode_state_to_hostnames)) + \
+           ("Encrypted: %s\n" % str(self.encrypted)) + \
+           ("Healthy: %s\n"   % str(self.is_healthy())) + \
+           "}"
+
+  def is_encrypted(self):
+    """
+    :return: Returns a bool indicating if HTTPS is enabled
+    """
+    return self.encrypted
+
+  def get_nn_unique_ids(self):
+    """
+    :return Returns a list of the nn unique ids
+    """
+    return self.nn_unique_ids
+
+  def get_nn_unique_id_to_addresses(self):
+    """
+    :return Returns a dictionary where the key is the nn unique id, and the value is a tuple of (http address, https address)
+    Each address is of the form, hostname:port
+    """
+    return self.nn_unique_id_to_addresses
+
+  def get_address_for_nn_id(self, id):
+    """
+    :param id: Namenode ID
+    :return: Returns the appropriate address (HTTP if no encryption, HTTPS otherwise) for the given namenode id.
+    """
+    if id in self.nn_unique_id_to_addresses:
+      addresses = self.nn_unique_id_to_addresses[id]
+      if addresses and len(addresses) == 2:
+        return addresses[1] if self.encrypted else addresses[0]
+    return None
+
+  def get_address_for_host(self, hostname):
+    """
+    :param hostname: Host name
+    :return: Returns the appropriate address (HTTP if no encryption, HTTPS otherwise) for the given host.
+    """
+    for id, addresses in self.nn_unique_id_to_addresses.iteritems():
+      if addresses and len(addresses) == 2:
+        if ":" in addresses[0]:
+          nn_hostname = addresses[0].split(":")[0].strip()
+          if nn_hostname == hostname:
+            # Found the host
+            return addresses[1] if self.encrypted else addresses[0]
+    return None
+
+  def get_namenode_state_to_hostnames(self):
+    """
+    :return Return a dictionary where the key is a member of NAMENODE_STATE, and the value is a set of hostnames.
+    """
+    return self.namenode_state_to_hostnames
+
+  def get_address(self, namenode_state):
+    """
+    @param namenode_state: Member of NAMENODE_STATE
+    :return Get the address that corresponds to the first host with the given state
+    """
+    hosts = self.namenode_state_to_hostnames[namenode_state] if namenode_state in self.namenode_state_to_hostnames else []
+    if hosts and len(hosts) > 0:
+      hostname = list(hosts)[0]
+      return self.get_address_for_host(hostname)
+    return None
+
+  def is_active(self, host_name):
+    """
+    :param host_name: Host name
+    :return: Return True if this is the active NameNode, otherwise, False.
+    """
+    return self._is_in_state(host_name, NAMENODE_STATE.ACTIVE)
+
+  def is_standby(self, host_name):
+    """
+    :param host_name: Host name
+    :return: Return True if this is the standby NameNode, otherwise, False.
+    """
+    return self._is_in_state(host_name, NAMENODE_STATE.STANDBY)
+
+  def _is_in_state(self, host_name, state):
+    """
+    :param host_name: Host name
+    :param state: State to check
+    :return: Return True if this NameNode is in the specified state, otherwise, False.
+    """
+    mapping = self.get_namenode_state_to_hostnames()
+    if state in mapping:
+      hosts_in_state = mapping[state]
+      if hosts_in_state is not None and len(hosts_in_state) == 1 and next(iter(hosts_in_state)).lower() == host_name.lower():
+        return True
+    return False
+
+  def is_healthy(self):
+    """
+    :return: Returns a bool indicating if exactly one ACTIVE and one STANDBY host exist.
+    """
+    active_hosts = self.namenode_state_to_hostnames[NAMENODE_STATE.ACTIVE] if NAMENODE_STATE.ACTIVE in self.namenode_state_to_hostnames else []
+    standby_hosts = self.namenode_state_to_hostnames[NAMENODE_STATE.STANDBY] if NAMENODE_STATE.STANDBY in self.namenode_state_to_hostnames else []
+    return len(active_hosts) == 1 and len(standby_hosts) == 1
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/namenode_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/namenode_upgrade.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/namenode_upgrade.py
new file mode 100755
index 0000000..5969fcf
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/namenode_upgrade.py
@@ -0,0 +1,262 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import re
+import os
+
+from resource_management.core.logger import Logger
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.default import default
+from resource_management.core.shell import call
+from resource_management.core.shell import as_user
+from resource_management.core.exceptions import Fail
+from resource_management.libraries.functions import Direction, SafeMode
+from resource_management.libraries.functions import get_unique_id_and_date
+from resource_management.core.exceptions import Fail
+from utils import get_dfsadmin_base_command
+from namenode_ha_state import NamenodeHAState
+
+safemode_to_instruction = {SafeMode.ON: "enter",
+                           SafeMode.OFF: "leave"}
+
+def prepare_upgrade_check_for_previous_dir():
+  """
+  During a NonRolling (aka Express Upgrade), preparing the NameNode requires backing up some data.
+  Check that there is no "previous" folder inside the NameNode Name Dir.
+  """
+  import params
+
+  if params.dfs_ha_enabled:
+    namenode_ha = NamenodeHAState()
+    if namenode_ha.is_active(params.hostname):
+      Logger.info("NameNode High Availability is enabled and this is the Active NameNode.")
+
+      problematic_previous_namenode_dirs = set()
+      nn_name_dirs = params.dfs_name_dir.split(',')
+      for nn_dir in nn_name_dirs:
+        if os.path.isdir(nn_dir):
+          # Check for a previous folder, which is not allowed.
+          previous_dir = os.path.join(nn_dir, "previous")
+          if os.path.isdir(previous_dir):
+            problematic_previous_namenode_dirs.add(previous_dir)
+
+      if len(problematic_previous_namenode_dirs) > 0:
+        message = 'WARNING. The following NameNode Name Dir(s) have a "previous" folder from an older version.\n' \
+                  'Please back it up first, and then delete it, OR Finalize (E.g., "hdfs dfsadmin -finalizeUpgrade").\n' \
+                  'NameNode Name Dir(s): {0}\n' \
+                  '***** Then, retry this step. *****'.format(", ".join(problematic_previous_namenode_dirs))
+        Logger.error(message)
+        raise Fail(message)
+
+def prepare_upgrade_enter_safe_mode():
+  """
+  During a NonRolling (aka Express Upgrade), preparing the NameNode requires first entering Safemode.
+  """
+  import params
+
+  dfsadmin_base_command = get_dfsadmin_base_command('hdfs')
+  safe_mode_enter_cmd = dfsadmin_base_command + " -safemode enter"
+  try:
+    # Safe to call if already in Safe Mode
+    desired_state = SafeMode.ON
+    safemode_transition_successful, original_state = reach_safemode_state(params.hdfs_user, desired_state, params.dfs_ha_enabled)
+    Logger.info("Transition successful: {0}, original state: {1}".format(str(safemode_transition_successful), str(original_state)))
+    if not safemode_transition_successful:
+      raise Fail("Could not transition to safemode state %s. Please check logs to make sure namenode is up." % str(desired_state))
+  except Exception, e:
+    message = "Could not enter safemode. Error: {0}. As the HDFS user, call this command: {1}".format(str(e), safe_mode_enter_cmd)
+    Logger.error(message)
+    raise Fail(message)
+
+def prepare_upgrade_save_namespace():
+  """
+  During a NonRolling (aka Express Upgrade), preparing the NameNode requires saving the namespace.
+  """
+  import params
+
+  dfsadmin_base_command = get_dfsadmin_base_command('hdfs')
+  save_namespace_cmd = dfsadmin_base_command + " -saveNamespace"
+  try:
+    Logger.info("Checkpoint the current namespace.")
+    as_user(save_namespace_cmd, params.hdfs_user, env={'PATH': params.hadoop_bin_dir})
+  except Exception, e:
+    message = format("Could not save the NameSpace. As the HDFS user, call this command: {save_namespace_cmd}")
+    Logger.error(message)
+    raise Fail(message)
+
+def prepare_upgrade_backup_namenode_dir():
+  """
+  During a NonRolling (aka Express Upgrade), preparing the NameNode requires backing up the NameNode Name Dirs.
+  """
+  import params
+
+  i = 0
+  failed_paths = []
+  nn_name_dirs = params.dfs_name_dir.split(',')
+  backup_destination_root_dir = "/tmp/upgrades/{0}".format(params.stack_version_unformatted)
+  if len(nn_name_dirs) > 0:
+    Logger.info("Backup the NameNode name directory's CURRENT folder.")
+  for nn_dir in nn_name_dirs:
+    i += 1
+    namenode_current_image = os.path.join(nn_dir, "current")
+    unique = get_unique_id_and_date() + "_" + str(i)
+    # Note that /tmp may not be writeable.
+    backup_current_folder = "{0}/namenode_{1}/".format(backup_destination_root_dir, unique)
+
+    if os.path.isdir(namenode_current_image) and not os.path.isdir(backup_current_folder):
+      try:
+        os.makedirs(backup_current_folder)
+        Execute(('cp', '-ar', namenode_current_image, backup_current_folder),
+                sudo=True
+        )
+      except Exception, e:
+        failed_paths.append(namenode_current_image)
+  if len(failed_paths) > 0:
+    Logger.error("Could not backup the NameNode Name Dir(s) to {0}, make sure that the destination path is "
+                 "writeable and copy the directories on your own. Directories: {1}".format(backup_destination_root_dir,
+                                                                                           ", ".join(failed_paths)))
+
+def prepare_upgrade_finalize_previous_upgrades():
+  """
+  During a NonRolling (aka Express Upgrade), preparing the NameNode requires Finalizing any upgrades that are in progress.
+  """
+  import params
+
+  dfsadmin_base_command = get_dfsadmin_base_command('hdfs')
+  finalize_command = dfsadmin_base_command + " -rollingUpgrade finalize"
+  try:
+    Logger.info("Attempt to Finalize if there are any in-progress upgrades. "
+                "This will return 255 if no upgrades are in progress.")
+    code, out = shell.checked_call(finalize_command, logoutput=True, user=params.hdfs_user)
+    if out:
+      expected_substring = "there is no rolling upgrade in progress"
+      if expected_substring not in out.lower():
+        Logger.warning('Finalize command did not contain substring: %s' % expected_substring)
+    else:
+      Logger.warning("Finalize command did not return any output.")
+  except Exception, e:
+    Logger.warning("Ensure no upgrades are in progress.")
+
+
+def reach_safemode_state(user, safemode_state, in_ha):
+  """
+  Enter or leave safemode for the Namenode.
+  @param user: user to perform action as
+  @param safemode_state: Desired state of ON or OFF
+  @param in_ha: bool indicating if Namenode High Availability is enabled
+  @:return Returns a tuple of (transition success, original state). If no change is needed, the indicator of
+  success will be True
+  """
+  Logger.info("Prepare to transition into safemode state %s" % safemode_state)
+  import params
+  original_state = SafeMode.UNKNOWN
+
+  hostname = params.hostname
+  safemode_check = format("su - {user} -c 'hdfs dfsadmin -safemode get'")
+
+  grep_pattern = format("Safe mode is {safemode_state} in {hostname}") if in_ha else format("Safe mode is {safemode_state}")
+  safemode_check_with_grep = format("su - {user} -c 'hdfs dfsadmin -safemode get | grep \"{grep_pattern}\"'")
+  code, out = call(safemode_check)
+  Logger.info("Command: %s\nCode: %d." % (safemode_check, code))
+  if code == 0 and out is not None:
+    Logger.info(out)
+    re_pattern = r"Safe mode is (\S*) in " + hostname.replace(".", "\\.") if in_ha else r"Safe mode is (\S*)"
+    m = re.search(re_pattern, out, re.IGNORECASE)
+    if m and len(m.groups()) >= 1:
+      original_state = m.group(1).upper()
+
+      if original_state == safemode_state:
+        return (True, original_state)
+      else:
+        # Make a transition
+        command = "hdfs dfsadmin -safemode %s" % (safemode_to_instruction[safemode_state])
+        Execute(command,
+                user=user,
+                logoutput=True,
+                path=[params.hadoop_bin_dir])
+
+        code, out = call(safemode_check_with_grep)
+        Logger.info("Command: %s\nCode: %d. Out: %s" % (safemode_check_with_grep, code, out))
+        if code == 0:
+          return (True, original_state)
+  return (False, original_state)
+
+
+def prepare_rolling_upgrade():
+  """
+  Perform either an upgrade or a downgrade.
+
+  Rolling Upgrade for HDFS Namenode requires the following.
+  0. Namenode must be up
+  1. Leave safemode if the safemode status is not OFF
+  2. Execute a rolling upgrade "prepare"
+  3. Execute a rolling upgrade "query"
+  """
+  import params
+
+  if not params.upgrade_direction or params.upgrade_direction not in [Direction.UPGRADE, Direction.DOWNGRADE]:
+    raise Fail("Could not retrieve upgrade direction: %s" % str(params.upgrade_direction))
+  Logger.info(format("Performing a(n) {params.upgrade_direction} of HDFS"))
+
+  if params.security_enabled:
+    Execute(format("{params.kinit_path_local} -kt {params.hdfs_user_keytab} {params.hdfs_principal_name}"))
+
+
+  if params.upgrade_direction == Direction.UPGRADE:
+    if params.dfs_ha_enabled:
+      Logger.info('High Availability is enabled, must leave safemode before calling "-rollingUpgrade prepare"')
+      desired_state = SafeMode.OFF
+      safemode_transition_successful, original_state = reach_safemode_state(params.hdfs_user, SafeMode.OFF, True)
+      if not safemode_transition_successful:
+        raise Fail("Could not transition to safemode state %s. Please check logs to make sure namenode is up." % str(SafeMode.OFF))
+
+    prepare = "hdfs dfsadmin -rollingUpgrade prepare"
+    query = "hdfs dfsadmin -rollingUpgrade query"
+    Execute(prepare,
+            user=params.hdfs_user,
+            logoutput=True)
+    Execute(query,
+            user=params.hdfs_user,
+            logoutput=True)
+  elif params.upgrade_direction == Direction.DOWNGRADE:
+    pass
+
+def finalize_upgrade(upgrade_type):
+  """
+  Finalize the Namenode upgrade, at which point it cannot be downgraded.
+  :param upgrade_type rolling or nonrolling
+  """
+  Logger.info("Executing Rolling Upgrade finalize")
+  import params
+
+  if params.security_enabled:
+    Execute(format("{params.kinit_path_local} -kt {params.hdfs_user_keytab} {params.hdfs_principal_name}"))
+
+  finalize_cmd = "hdfs dfsadmin -rollingUpgrade finalize"
+  query_cmd = "hdfs dfsadmin -rollingUpgrade query"
+
+  Execute(query_cmd,
+        user=params.hdfs_user,
+        logoutput=True)
+  Execute(finalize_cmd,
+          user=params.hdfs_user,
+          logoutput=True)
+  Execute(query_cmd,
+          user=params.hdfs_user,
+          logoutput=True)

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/nfsgateway.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/nfsgateway.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/nfsgateway.py
new file mode 100755
index 0000000..ff4778a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/nfsgateway.py
@@ -0,0 +1,138 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_XML
+from hdfs_nfsgateway import nfsgateway
+from hdfs import hdfs
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
+
+
+class NFSGateway(Script):
+
+  def get_component_name(self):
+    return "hadoop-hdfs-nfs3"
+
+  def install(self, env):
+    import params
+
+    env.set_params(params)
+
+    self.install_packages(env)
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if Script.is_stack_greater_or_equal('4.1.0.0'):
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-hdfs-nfs3", params.version)
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    self.configure(env)
+    nfsgateway(action="start")
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    nfsgateway(action="stop")
+
+  def configure(self, env):
+    import params
+
+    env.set_params(params)
+    hdfs()
+    nfsgateway(action="configure")
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+
+    check_process_status(status_params.nfsgateway_pid_file)
+
+  def security_status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    props_value_check = {"hadoop.security.authentication": "kerberos",
+                         "hadoop.security.authorization": "true"}
+    props_empty_check = ["hadoop.security.auth_to_local"]
+    props_read_check = None
+    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
+                                                props_read_check)
+    props_value_check = None
+    props_empty_check = ['nfs.keytab.file',
+                         'nfs.kerberos.principal']
+    props_read_check = ['nfs.keytab.file']
+    hdfs_site_expectations = build_expectations('hdfs-site', props_value_check, props_empty_check,
+                                                props_read_check)
+
+    hdfs_expectations = {}
+    hdfs_expectations.update(core_site_expectations)
+    hdfs_expectations.update(hdfs_site_expectations)
+
+    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
+                                                 {'core-site.xml': FILE_TYPE_XML,
+                                                  'hdfs-site.xml': FILE_TYPE_XML})
+    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
+        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
+      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
+      if not result_issues:  # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if ('hdfs-site' not in security_params or
+                'nfs.keytab.file' not in security_params['hdfs-site'] or
+                'nfs.kerberos.principal' not in security_params['hdfs-site']):
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out(
+              {"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.hdfs_user,
+                                security_params['hdfs-site']['nfs.keytab.file'],
+                                security_params['hdfs-site'][
+                                  'nfs.kerberos.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+if __name__ == "__main__":
+  NFSGateway().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/params.py
new file mode 100755
index 0000000..1f76f80
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/params.py
@@ -0,0 +1,326 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.functions import conf_select, stack_select
+from resource_management.libraries.functions.version import format_stack_version, compare_versions
+from resource_management.libraries.functions.default import default
+from resource_management import *
+import status_params
+import utils
+import os
+import itertools
+import re
+
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+stack_name = default("/hostLevelParams/stack_name", None)
+upgrade_direction = default("/commandParams/upgrade_direction", None)
+
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+stack_version = format_stack_version(stack_version_unformatted)
+
+# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
+version = default("/commandParams/version", None)
+
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+hdfs_user = status_params.hdfs_user
+root_user = "root"
+hadoop_pid_dir_prefix = status_params.hadoop_pid_dir_prefix
+
+# Some datanode settings
+dfs_dn_addr = default('/configurations/hdfs-site/dfs.datanode.address', None)
+dfs_dn_http_addr = default('/configurations/hdfs-site/dfs.datanode.http.address', None)
+dfs_dn_https_addr = default('/configurations/hdfs-site/dfs.datanode.https.address', None)
+dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None)
+dfs_dn_ipc_address = config['configurations']['hdfs-site']['dfs.datanode.ipc.address']
+secure_dn_ports_are_in_use = False
+
+#hadoop params
+mapreduce_libs_path = "/usr/iop/current/hadoop-mapreduce-client/*"
+hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
+hadoop_bin = stack_select.get_hadoop_dir("sbin")
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hadoop_home = "/usr/iop/current/hadoop-client"
+if not security_enabled:
+  hadoop_secure_dn_user = '""'
+else:
+  dfs_dn_port = utils.get_port(dfs_dn_addr)
+  dfs_dn_http_port = utils.get_port(dfs_dn_http_addr)
+  dfs_dn_https_port = utils.get_port(dfs_dn_https_addr)
+  # We try to avoid inability to start datanode as a plain user due to usage of root-owned ports
+  if dfs_http_policy == "HTTPS_ONLY":
+    secure_dn_ports_are_in_use = utils.is_secure_port(dfs_dn_port) or utils.is_secure_port(dfs_dn_https_port)
+  elif dfs_http_policy == "HTTP_AND_HTTPS":
+    secure_dn_ports_are_in_use = utils.is_secure_port(dfs_dn_port) or utils.is_secure_port(dfs_dn_http_port) or utils.is_secure_port(dfs_dn_https_port)
+  else:   # params.dfs_http_policy == "HTTP_ONLY" or not defined:
+    secure_dn_ports_are_in_use = utils.is_secure_port(dfs_dn_port) or utils.is_secure_port(dfs_dn_http_port)
+  if secure_dn_ports_are_in_use:
+    hadoop_secure_dn_user = hdfs_user
+  else:
+    hadoop_secure_dn_user = '""'
+
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+hadoop_conf_secure_dir = os.path.join(hadoop_conf_dir, "secure")
+hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
+limits_conf_dir = "/etc/security/limits.d"
+hadoop_lib_home = stack_select.get_hadoop_dir("lib")
+ambari_libs_dir = "/var/lib/ambari-agent/lib"
+
+#snappy
+create_lib_snappy_symlinks = False
+snappy_so = "libsnappy.so"
+so_target_dir_x86 = format("{hadoop_lib_home}/native/Linux-i386-32")
+so_target_dir_x64 = format("{hadoop_lib_home}/native/Linux-amd64-64")
+so_target_x86 = format("{so_target_dir_x86}/{snappy_so}")
+so_target_x64 = format("{so_target_dir_x64}/{snappy_so}")
+so_src_dir_x86 = format("{hadoop_home}/lib")
+so_src_dir_x64 = format("{hadoop_home}/lib/native")
+so_src_x86 = format("{so_src_dir_x86}/{snappy_so}")
+so_src_x64 = format("{so_src_dir_x64}/{snappy_so}")
+
+execute_path = os.environ['PATH'] + os.pathsep + hadoop_bin_dir
+ulimit_cmd = "ulimit -c unlimited ; "
+
+#security params
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+falcon_user = config['configurations']['falcon-env']['falcon_user']
+
+#exclude file
+hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
+exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
+update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
+
+klist_path_local = functions.get_klist_path()
+kinit_path_local = functions.get_kinit_path()
+#hosts
+hostname = config["hostname"]
+rm_host = default("/clusterHostInfo/rm_host", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
+hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+hs_host = default("/clusterHostInfo/hs_host", [])
+jtnode_host = default("/clusterHostInfo/jtnode_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+nm_host = default("/clusterHostInfo/nm_host", [])
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+journalnode_hosts = default("/clusterHostInfo/journalnode_hosts", [])
+zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", [])
+falcon_host = default("/clusterHostInfo/falcon_server_hosts", [])
+
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+has_namenodes = not len(namenode_host) == 0
+has_jobtracker = not len(jtnode_host) == 0
+has_resourcemanager = not len(rm_host) == 0
+has_histroryserver = not len(hs_host) == 0
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_slaves = not len(slave_hosts) == 0
+has_oozie_server = not len(oozie_servers)  == 0
+has_hcat_server_host = not len(hcat_server_hosts)  == 0
+has_hive_server_host = not len(hive_server_host)  == 0
+has_journalnode_hosts = not len(journalnode_hosts)  == 0
+has_zkfc_hosts = not len(zkfc_hosts)  == 0
+has_falcon_host = not len(falcon_host)  == 0
+
+
+is_namenode_master = hostname in namenode_host
+is_jtnode_master = hostname in jtnode_host
+is_rmnode_master = hostname in rm_host
+is_hsnode_master = hostname in hs_host
+is_hbase_master = hostname in hbase_master_hosts
+is_slave = hostname in slave_hosts
+
+if has_ganglia_server:
+  ganglia_server_host = ganglia_server_hosts[0]
+
+#users and groups
+yarn_user = config['configurations']['yarn-env']['yarn_user']
+hbase_user = config['configurations']['hbase-env']['hbase_user']
+oozie_user = config['configurations']['oozie-env']['oozie_user']
+webhcat_user = config['configurations']['hive-env']['hcat_user']
+hcat_user = config['configurations']['hive-env']['hcat_user']
+hive_user = config['configurations']['hive-env']['hive_user']
+smoke_user =  config['configurations']['cluster-env']['smokeuser']
+smokeuser_principal =  config['configurations']['cluster-env']['smokeuser_principal_name']
+mapred_user = config['configurations']['mapred-env']['mapred_user']
+hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', None)
+
+user_group = config['configurations']['cluster-env']['user_group']
+root_group = "root"
+proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
+
+#hadoop params
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
+
+dfs_domain_socket_path = config['configurations']['hdfs-site']['dfs.domain.socket.path']
+dfs_domain_socket_dir = os.path.dirname(dfs_domain_socket_path)
+
+jn_edits_dir = config['configurations']['hdfs-site']['dfs.journalnode.edits.dir']
+
+dfs_name_dir = config['configurations']['hdfs-site']['dfs.namenode.name.dir']
+
+namenode_dirs_created_stub_dir = format("{hdfs_log_dir_prefix}/{hdfs_user}")
+namenode_dirs_stub_filename = "namenode_dirs_created"
+
+smoke_hdfs_user_dir = format("/user/{smoke_user}")
+smoke_hdfs_user_mode = 0770
+
+
+hdfs_namenode_formatted_mark_suffix = "/namenode-formatted/"
+namenode_formatted_old_mark_dirs = ["/var/run/hadoop/hdfs/namenode-formatted",
+  format("{hadoop_pid_dir_prefix}/hdfs/namenode/formatted"),
+  "/var/lib/hdfs/namenode/formatted"]
+dfs_name_dirs = dfs_name_dir.split(",")
+namenode_formatted_mark_dirs = []
+for dn_dir in dfs_name_dirs:
+ tmp_mark_dir = format("{dn_dir}{hdfs_namenode_formatted_mark_suffix}")
+ namenode_formatted_mark_dirs.append(tmp_mark_dir)
+
+# Use the namenode RPC address if configured, otherwise, fallback to the default file system
+namenode_address = None
+if 'dfs.namenode.rpc-address' in config['configurations']['hdfs-site']:
+  namenode_rpcaddress = config['configurations']['hdfs-site']['dfs.namenode.rpc-address']
+  namenode_address = format("hdfs://{namenode_rpcaddress}")
+else:
+  namenode_address = config['configurations']['core-site']['fs.defaultFS']
+
+fs_checkpoint_dirs = default("/configurations/hdfs-site/dfs.namenode.checkpoint.dir", "").split(',')
+
+dfs_data_dirs = config['configurations']['hdfs-site']['dfs.datanode.data.dir']
+
+data_dir_mount_file = config['configurations']['hadoop-env']['dfs.datanode.data.dir.mount.file']
+
+# HDFS High Availability properties
+dfs_ha_enabled = False
+dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
+dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
+dfs_ha_automatic_failover_enabled = default("/configurations/hdfs-site/dfs.ha.automatic-failover.enabled", False)
+
+# hostname of the active HDFS HA Namenode (only used when HA is enabled)
+dfs_ha_namenode_active = default("/configurations/hadoop-env/dfs_ha_initial_namenode_active", None)
+# hostname of the standby HDFS HA Namenode (only used when HA is enabled)
+dfs_ha_namenode_standby = default("/configurations/hadoop-env/dfs_ha_initial_namenode_standby", None)
+
+namenode_id = None
+namenode_rpc = None
+
+dfs_ha_namemodes_ids_list = []
+other_namenode_id = None
+
+if dfs_ha_namenode_ids:
+  dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
+  dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
+  if dfs_ha_namenode_ids_array_len > 1:
+    dfs_ha_enabled = True
+if dfs_ha_enabled:
+  for nn_id in dfs_ha_namemodes_ids_list:
+    nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
+    if hostname in nn_host:
+      namenode_id = nn_id
+      namenode_rpc = nn_host
+
+if dfs_http_policy is not None and dfs_http_policy.upper() == "HTTPS_ONLY":
+  https_only = True
+  journalnode_address = default('/configurations/hdfs-site/dfs.journalnode.https-address', None)
+else:
+  https_only = False
+  journalnode_address = default('/configurations/hdfs-site/dfs.journalnode.http-address', None)
+
+if journalnode_address:
+  journalnode_port = journalnode_address.split(":")[1]
+
+
+if security_enabled:
+  _dn_principal_name = config['configurations']['hdfs-site']['dfs.datanode.kerberos.principal']
+  _dn_keytab = config['configurations']['hdfs-site']['dfs.datanode.keytab.file']
+  _dn_principal_name = _dn_principal_name.replace('_HOST',hostname.lower())
+
+  dn_kinit_cmd = format("{kinit_path_local} -kt {_dn_keytab} {_dn_principal_name};")
+
+  _nn_principal_name = config['configurations']['hdfs-site']['dfs.namenode.kerberos.principal']
+  _nn_keytab = config['configurations']['hdfs-site']['dfs.namenode.keytab.file']
+  _nn_principal_name = _nn_principal_name.replace('_HOST',hostname.lower())
+
+  nn_kinit_cmd = format("{kinit_path_local} -kt {_nn_keytab} {_nn_principal_name};")
+
+  _jn_principal_name = default("/configurations/hdfs-site/dfs.journalnode.kerberos.principal", None)
+  if _jn_principal_name:
+    _jn_principal_name = _jn_principal_name.replace('_HOST', hostname.lower())
+  _jn_keytab = default("/configurations/hdfs-site/dfs.journalnode.keytab.file", None)
+  jn_kinit_cmd = format("{kinit_path_local} -kt {_jn_keytab} {_jn_principal_name};")
+else:
+  dn_kinit_cmd = ""
+  nn_kinit_cmd = ""
+  jn_kinit_cmd = ""
+
+
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
+import functools
+#create partial functions with common arguments for every HdfsDirectory call
+#to create hdfs directory we need to call params.HdfsDirectory in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
+)
+
+io_compression_codecs = config['configurations']['core-site']['io.compression.codecs']
+if not "com.hadoop.compression.lzo" in io_compression_codecs:
+  exclude_packages = ["lzo", "hadoop-lzo", "hadoop-lzo-native", "liblzo2-2"]
+else:
+  exclude_packages = []
+name_node_params = default("/commandParams/namenode", None)
+
+#hadoop params
+hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
+
+#hadoop-env.sh
+java_home = config['hostLevelParams']['java_home']
+java_version = int(config['hostLevelParams']['java_version'])
+
+jsvc_path = "/usr/lib/bigtop-utils"
+
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
+namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
+namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
+namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
+
+jtnode_opt_newsize = "200m"
+jtnode_opt_maxnewsize = "200m"
+jtnode_heapsize =  "1024m"
+ttnode_heapsize = "1024m"
+
+dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
+mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
+mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/service_check.py
new file mode 100755
index 0000000..ffbe658
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/service_check.py
@@ -0,0 +1,119 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+class HdfsServiceCheck(Script):
+  def service_check(self, env):
+    import params
+
+    env.set_params(params)
+    unique = functions.get_unique_id_and_date()
+    dir = '/tmp'
+    tmp_file = format("{dir}/{unique}")
+
+    safemode_command = "dfsadmin -safemode get | grep OFF"
+
+    create_dir_cmd = format("fs -mkdir {dir}")
+    chmod_command = format("fs -chmod 777 {dir}")
+    test_dir_exists = as_user(format("{hadoop_bin_dir}/hadoop --config {hadoop_conf_dir} fs -test -e {dir}"), params.hdfs_user)
+    cleanup_cmd = format("fs -rm {tmp_file}")
+    #cleanup put below to handle retries; if retrying there wil be a stale file
+    #that needs cleanup; exit code is fn of second command
+    create_file_cmd = format(
+      "{cleanup_cmd}; hadoop --config {hadoop_conf_dir} fs -put /etc/passwd {tmp_file}")
+    test_cmd = format("fs -test -e {tmp_file}")
+    if params.security_enabled:
+      Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
+        user=params.hdfs_user
+      )
+    ExecuteHadoop(safemode_command,
+                  user=params.hdfs_user,
+                  logoutput=True,
+                  conf_dir=params.hadoop_conf_dir,
+                  try_sleep=3,
+                  tries=20,
+                  bin_dir=params.hadoop_bin_dir
+    )
+    ExecuteHadoop(create_dir_cmd,
+                  user=params.hdfs_user,
+                  logoutput=True,
+                  not_if=test_dir_exists,
+                  conf_dir=params.hadoop_conf_dir,
+                  try_sleep=3,
+                  tries=5,
+                  bin_dir=params.hadoop_bin_dir
+    )
+    ExecuteHadoop(chmod_command,
+                  user=params.hdfs_user,
+                  logoutput=True,
+                  conf_dir=params.hadoop_conf_dir,
+                  try_sleep=3,
+                  tries=5,
+                  bin_dir=params.hadoop_bin_dir
+    )
+    ExecuteHadoop(create_file_cmd,
+                  user=params.hdfs_user,
+                  logoutput=True,
+                  conf_dir=params.hadoop_conf_dir,
+                  try_sleep=3,
+                  tries=5,
+                  bin_dir=params.hadoop_bin_dir
+    )
+    ExecuteHadoop(test_cmd,
+                  user=params.hdfs_user,
+                  logoutput=True,
+                  conf_dir=params.hadoop_conf_dir,
+                  try_sleep=3,
+                  tries=5,
+                  bin_dir=params.hadoop_bin_dir
+    )
+    if params.has_journalnode_hosts:
+      journalnode_port = params.journalnode_port
+      checkWebUIFileName = "checkWebUI.py"
+      checkWebUIFilePath = format("{tmp_dir}/{checkWebUIFileName}")
+      comma_sep_jn_hosts = ",".join(params.journalnode_hosts)
+      checkWebUICmd = format("python {checkWebUIFilePath} -m {comma_sep_jn_hosts} -p {journalnode_port} -s {https_only}")
+      File(checkWebUIFilePath,
+           content=StaticFile(checkWebUIFileName),
+           mode=0775)
+
+      Execute(checkWebUICmd,
+              logoutput=True,
+              try_sleep=3,
+              tries=5,
+              user=params.smoke_user
+      )
+
+    if params.is_namenode_master:
+      if params.has_zkfc_hosts:
+        pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
+        pid_file = format("{pid_dir}/hadoop-{hdfs_user}-zkfc.pid")
+        check_zkfc_process_cmd = format(
+          "ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1")
+        Execute(check_zkfc_process_cmd,
+                logoutput=True,
+                try_sleep=3,
+                tries=5
+        )
+
+
+if __name__ == "__main__":
+  HdfsServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/snamenode.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/snamenode.py
new file mode 100755
index 0000000..4224a9e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/snamenode.py
@@ -0,0 +1,142 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_XML
+from resource_management.core.logger import Logger
+
+from hdfs_snamenode import snamenode
+from hdfs import hdfs
+
+class SNameNode(Script):
+
+  def get_component_name(self):
+    return "hadoop-hdfs-secondarynamenode"
+
+  def install(self, env):
+    import params
+
+    env.set_params(params)
+
+    self.install_packages(env)
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Stack Upgrade pre-restart")
+    import params
+    env.set_params(params)
+
+    if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-hdfs-secondarynamenode", params.version)
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    self.configure(env)
+    snamenode(action="start")
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    snamenode(action="stop")
+
+  def configure(self, env):
+    import params
+
+    env.set_params(params)
+    hdfs()
+    snamenode(action="configure")
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+
+    check_process_status(status_params.snamenode_pid_file)
+
+  def security_status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    props_value_check = {"hadoop.security.authentication": "kerberos",
+                         "hadoop.security.authorization": "true"}
+    props_empty_check = ["hadoop.security.auth_to_local"]
+    props_read_check = None
+    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
+                                                props_read_check)
+    props_value_check = None
+    props_empty_check = ['dfs.secondary.namenode.kerberos.internal.spnego.principal',
+                         'dfs.secondary.namenode.keytab.file',
+                         'dfs.secondary.namenode.kerberos.principal']
+    props_read_check = ['dfs.secondary.namenode.keytab.file']
+    hdfs_site_expectations = build_expectations('hdfs-site', props_value_check, props_empty_check,
+                                                props_read_check)
+
+    hdfs_expectations = {}
+    hdfs_expectations.update(core_site_expectations)
+    hdfs_expectations.update(hdfs_site_expectations)
+
+    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
+                                                 {'core-site.xml': FILE_TYPE_XML,
+                                                  'hdfs-site.xml': FILE_TYPE_XML})
+
+    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
+        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
+      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
+      if not result_issues:  # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if ('hdfs-site' not in security_params or
+                  'dfs.secondary.namenode.keytab.file' not in security_params['hdfs-site'] or
+                  'dfs.secondary.namenode.kerberos.principal' not in security_params['hdfs-site']):
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out(
+              {"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.hdfs_user,
+                                security_params['hdfs-site']['dfs.secondary.namenode.keytab.file'],
+                                security_params['hdfs-site'][
+                                  'dfs.secondary.namenode.kerberos.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+
+if __name__ == "__main__":
+  SNameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/status_params.py
new file mode 100755
index 0000000..f1abf08
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/status_params.py
@@ -0,0 +1,42 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+
+config = Script.get_config()
+
+hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+hadoop_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
+datanode_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-datanode.pid")
+namenode_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-namenode.pid")
+snamenode_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-secondarynamenode.pid")
+journalnode_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-journalnode.pid")
+zkfc_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-zkfc.pid")
+nfsgateway_pid_file = format("{hadoop_pid_dir_prefix}/root/hadoop_privileged_nfs3.pid")
+
+# Security related/required params
+hostname = config['hostname']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+hdfs_user_principal = config['configurations']['hadoop-env']['hdfs_principal_name']
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+kinit_path_local = functions.get_kinit_path()
+tmp_dir = Script.get_tmp_dir()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/utils.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/utils.py
new file mode 100755
index 0000000..5cd1735
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/utils.py
@@ -0,0 +1,357 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+import re
+import urllib2
+import json
+
+from resource_management import *
+from resource_management.libraries.functions.format import format
+from resource_management.core.shell import call, checked_call
+from resource_management.core.exceptions import ComponentIsNotRunning
+from resource_management.libraries.functions.curl_krb_request import curl_krb_request
+
+from zkfc_slave import ZkfcSlave
+
+def safe_zkfc_op(action, env):
+  """
+  Idempotent operation on the zkfc process to either start or stop it.
+  :param action: start or stop
+  :param env: environment
+  """
+  zkfc = None
+  if action == "start":
+    try:
+      zkfc = ZkfcSlave()
+      zkfc.status(env)
+    except ComponentIsNotRunning:
+      if zkfc:
+        zkfc.start(env)
+
+  if action == "stop":
+    try:
+      zkfc = ZkfcSlave()
+      zkfc.status(env)
+    except ComponentIsNotRunning:
+      pass
+    else:
+      if zkfc:
+        zkfc.stop(env)
+
+
+def failover_namenode():
+  """
+  Failover the primary namenode by killing zkfc if it exists on this host (assuming this host is the primary).
+  """
+  import params
+  check_service_cmd = format("hdfs haadmin -getServiceState {namenode_id}")
+  code, out = call(check_service_cmd, logoutput=True, user=params.hdfs_user)
+
+  state = "unknown"
+  if code == 0 and out:
+    state = "active" if "active" in out else ("standby" if "standby" in out else state)
+    Logger.info("Namenode service state: %s" % state)
+
+  if state == "active":
+    Logger.info("Rolling Upgrade - Initiating namenode failover by killing zkfc on active namenode")
+
+    # Forcefully kill ZKFC on this host to initiate a failover
+    # If ZKFC is already dead, then potentially this node can still be the active one.
+    was_zkfc_killed = kill_zkfc(params.hdfs_user)
+
+    # Wait until it transitions to standby
+    check_standby_cmd = format("hdfs haadmin -getServiceState {namenode_id} | grep standby")
+
+    # process may already be down.  try one time, then proceed
+    code, out = call(check_standby_cmd, user=params.hdfs_user, logoutput=True)
+    Logger.info(format("Rolling Upgrade - check for standby returned {code}"))
+
+    if code == 255 and out:
+      Logger.info("Rolling Upgrade - namenode is already down")
+    else:
+      if was_zkfc_killed:
+        # Only mandate that this be the standby namenode if ZKFC was indeed killed to initiate a failover.
+        Execute(check_standby_cmd,
+                user=params.hdfs_user,
+                tries=50,
+                try_sleep=6,
+                logoutput=True)
+
+  else:
+    Logger.info("Rolling Upgrade - Host %s is the standby namenode." % str(params.hostname))
+
+
+def kill_zkfc(zkfc_user):
+  """
+  There are two potential methods for failing over the namenode, especially during a Rolling Upgrade.
+  Option 1. Kill zkfc on primary namenode provided that the secondary is up and has zkfc running on it.
+  Option 2. Silent failover (not supported as of IOP 4.0.0.0)
+  :param zkfc_user: User that started the ZKFC process.
+  :return: Return True if ZKFC was killed, otherwise, false.
+  """
+  import params
+  if params.dfs_ha_enabled:
+    zkfc_pid_file = get_service_pid_file("zkfc", zkfc_user)
+    if zkfc_pid_file:
+      check_process = format("ls {zkfc_pid_file} > /dev/null 2>&1 && ps -p `cat {zkfc_pid_file}` > /dev/null 2>&1")
+      code, out = call(check_process)
+      if code == 0:
+        Logger.debug("ZKFC is running and will be killed to initiate namenode failover.")
+        kill_command = format("{check_process} && kill -9 `cat {zkfc_pid_file}` > /dev/null 2>&1")
+        Execute(kill_command)
+        Execute(format("rm -f {zkfc_pid_file}"))
+        return True
+  return False
+
+
+def get_service_pid_file(name, user):
+  """
+  Get the pid file path that was used to start the service by the user.
+  :param name: Service name
+  :param user: User that started the service.
+  :return: PID file path
+  """
+  import params
+  pid_dir = format("{hadoop_pid_dir_prefix}/{user}")
+  pid_file = format("{pid_dir}/hadoop-{user}-{name}.pid")
+  return pid_file
+
+
+def service(action=None, name=None, user=None, options="", create_pid_dir=False,
+            create_log_dir=False):
+  """
+  :param action: Either "start" or "stop"
+  :param name: Component name, e.g., "namenode", "datanode", "secondarynamenode", "zkfc"
+  :param user: User to run the command as
+  :param options: Additional options to pass to command as a string
+  :param create_pid_dir: Create PID directory
+  :param create_log_dir: Crate log file directory
+  """
+  import params
+
+  options = options if options else ""
+  pid_dir = format("{hadoop_pid_dir_prefix}/{user}")
+  pid_file = format("{pid_dir}/hadoop-{user}-{name}.pid")
+  hadoop_env_exports = {
+    'HADOOP_LIBEXEC_DIR': params.hadoop_libexec_dir
+  }
+  log_dir = format("{hdfs_log_dir_prefix}/{user}")
+
+  # NFS GATEWAY is always started by root using jsvc due to rpcbind bugs
+  # on Linux such as CentOS6.2. https://bugzilla.redhat.com/show_bug.cgi?id=731542
+  if name == "nfs3" :
+    pid_file = format("{pid_dir}/hadoop_privileged_nfs3.pid")
+    custom_export = {
+      'HADOOP_PRIVILEGED_NFS_USER': params.hdfs_user,
+      'HADOOP_PRIVILEGED_NFS_PID_DIR': pid_dir,
+      'HADOOP_PRIVILEGED_NFS_LOG_DIR': log_dir
+    }
+    hadoop_env_exports.update(custom_export)
+
+  check_process = format(
+    "ls {pid_file} >/dev/null 2>&1 &&"
+    " ps -p `cat {pid_file}` >/dev/null 2>&1")
+
+  # on STOP directories shouldn't be created
+  # since during stop still old dirs are used (which were created during previous start)
+  if action != "stop":
+    if name == "nfs3":
+      Directory(params.hadoop_pid_dir_prefix,
+                mode=0755,
+                owner=params.root_user,
+                group=params.root_group
+      )
+    else:
+      Directory(params.hadoop_pid_dir_prefix,
+                  mode=0755,
+                  owner=params.hdfs_user,
+                  group=params.user_group
+      )
+    if create_pid_dir:
+      Directory(pid_dir,
+                owner=user,
+                create_parents=True)
+    if create_log_dir:
+      if name == "nfs3":
+        Directory(log_dir,
+                  mode=0775,
+                  owner=params.root_user,
+                  group=params.user_group)
+      else:
+        Directory(log_dir,
+                  owner=user,
+                  create_parents=True)
+
+  if params.security_enabled and name == "datanode":
+    ## The directory where pid files are stored in the secure data environment.
+    hadoop_secure_dn_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
+    hadoop_secure_dn_pid_file = format("{hadoop_secure_dn_pid_dir}/hadoop_secure_dn.pid")
+
+    if params.secure_dn_ports_are_in_use:
+      user = "root"
+      pid_file = format(
+        "{hadoop_pid_dir_prefix}/{hdfs_user}/hadoop-{hdfs_user}-{name}.pid")
+
+    if action == 'stop' and os.path.isfile(hadoop_secure_dn_pid_file):
+        # We need special handling for this case to handle the situation
+        # when we configure non-root secure DN and then restart it
+        # to handle new configs. Otherwise we will not be able to stop
+        # a running instance
+        user = "root"
+
+        try:
+          check_process_status(hadoop_secure_dn_pid_file)
+
+          custom_export = {
+            'HADOOP_SECURE_DN_USER': params.hdfs_user
+          }
+          hadoop_env_exports.update(custom_export)
+
+        except ComponentIsNotRunning:
+          pass
+
+  hadoop_daemon = format("{hadoop_bin}/hadoop-daemon.sh")
+
+  if user == "root":
+    cmd = [hadoop_daemon, "--config", params.hadoop_conf_dir, action, name]
+    if options:
+      cmd += [options, ]
+    daemon_cmd = as_sudo(cmd)
+  else:
+    cmd = format("{ulimit_cmd} {hadoop_daemon} --config {hadoop_conf_dir} {action} {name}")
+    if options:
+      cmd += " " + options
+    daemon_cmd = as_user(cmd, user)
+
+  service_is_up = check_process if action == "start" else None
+  #remove pid file from dead process
+  File(pid_file,
+       action="delete",
+       not_if=check_process
+  )
+  Execute(daemon_cmd,
+          not_if=service_is_up,
+          environment=hadoop_env_exports
+  )
+
+  if action == "stop":
+    File(pid_file,
+         action="delete",
+    )
+
+
+def get_value_from_jmx(qry, property):
+  try:
+    response = urllib2.urlopen(qry)
+    data = response.read()
+    if data:
+      data_dict = json.loads(data)
+      return data_dict["beans"][0][property]
+  except:
+    return None
+
+def get_jmx_data(nn_address, modeler_type, metric, encrypted=False, security_enabled=False):
+  """
+  :param nn_address: Namenode Address, e.g., host:port, ** MAY ** be preceded with "http://" or "https://" already.
+  If not preceded, will use the encrypted param to determine.
+  :param modeler_type: Modeler type to query using startswith function
+  :param metric: Metric to return
+  :return: Return an object representation of the metric, or None if it does not exist
+  """
+  if not nn_address or not modeler_type or not metric:
+    return None
+
+  nn_address = nn_address.strip()
+  if not nn_address.startswith("http"):
+    nn_address = ("https://" if encrypted else "http://") + nn_address
+  if not nn_address.endswith("/"):
+    nn_address = nn_address + "/"
+
+  nn_address = nn_address + "jmx"
+  Logger.info("Retrieve modeler: %s, metric: %s from JMX endpoint %s" % (modeler_type, metric, nn_address))
+
+  if security_enabled:
+    import params
+    data, error_msg, time_millis = curl_krb_request(params.tmp_dir, params.smoke_user_keytab, params.smokeuser_principal, nn_address,
+                            "jn_upgrade", params.kinit_path_local, False, None, params.smoke_user)
+  else:
+    data = urllib2.urlopen(nn_address).read()
+  my_data = None
+  if data:
+    data_dict = json.loads(data)
+    if data_dict:
+      for el in data_dict['beans']:
+        if el is not None and el['modelerType'] is not None and el['modelerType'].startswith(modeler_type):
+          if metric in el:
+            my_data = el[metric]
+            if my_data:
+              my_data = json.loads(str(my_data))
+              break
+  return my_data
+
+def get_port(address):
+  """
+  Extracts port from the address like 0.0.0.0:1019
+  """
+  if address is None:
+    return None
+  m = re.search(r'(?:http(?:s)?://)?([\w\d.]*):(\d{1,5})', address)
+  if m is not None and len(m.groups()) >= 2:
+    return int(m.group(2))
+  else:
+    return None
+
+
+def is_secure_port(port):
+  """
+  Returns True if port is root-owned at *nix systems
+  """
+  if port is not None:
+    return port < 1024
+  else:
+    return False
+
+def get_dfsadmin_base_command(hdfs_binary, use_specific_namenode = False):
+  """
+  Get the dfsadmin base command constructed using hdfs_binary path and passing namenode address as explicit -fs argument
+  :param hdfs_binary: path to hdfs binary to use
+  :param use_specific_namenode: flag if set and Namenode HA is enabled, then the dfsadmin command will use
+  current namenode's address
+  :return: the constructed dfsadmin base command
+  """
+  import params
+  dfsadmin_base_command = ""
+  if params.dfs_ha_enabled and use_specific_namenode:
+    dfsadmin_base_command = format("{hdfs_binary} dfsadmin -fs hdfs://{params.namenode_rpc}")
+  else:
+    dfsadmin_base_command = format("{hdfs_binary} dfsadmin -fs {params.namenode_address}")
+  return dfsadmin_base_command
+
+def is_previous_fs_image():
+  """
+  Return true if there's a previous folder in the HDFS namenode directories.
+  """
+  import params
+  if params.dfs_name_dir:
+    nn_name_dirs = params.dfs_name_dir.split(',')
+    for nn_dir in nn_name_dirs:
+      prev_dir = os.path.join(nn_dir, "previous")
+      if os.path.isdir(prev_dir):
+        return True
+  return False


[39/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hdfs-site.xml
new file mode 100755
index 0000000..fc510fa
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hdfs-site.xml
@@ -0,0 +1,606 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration supports_final="true">
+
+  <!-- file system properties -->
+
+  <property>
+    <name>dfs.namenode.name.dir</name>
+    <!-- cluster variant -->
+    <value>/hadoop/hdfs/namenode</value>
+    <description>Determines where on the local filesystem the DFS name node
+      should store the name table.  If this is a comma-delimited list
+      of directories then the name table is replicated in all of the
+      directories, for redundancy. </description>
+    <display-name>NameNode directories</display-name>
+    <final>true</final>
+    <value-attributes>
+      <type>directories</type>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>dfs.support.append</name>
+    <value>true</value>
+    <description>to enable dfs append</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.webhdfs.enabled</name>
+    <value>true</value>
+    <display-name>WebHDFS enabled</display-name>
+    <description>Whether to enable WebHDFS feature</description>
+    <final>true</final>
+    <value-attributes>
+      <type>boolean</type>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>dfs.datanode.failed.volumes.tolerated</name>
+    <value>0</value>
+    <description> Number of failed disks a DataNode would tolerate before it stops offering service</description>
+    <final>true</final>
+    <display-name>DataNode failed disk tolerance</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>2</maximum>
+      <increment-step>1</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hdfs-site</type>
+        <name>dfs.datanode.data.dir</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>dfs.datanode.data.dir</name>
+    <value>/hadoop/hdfs/data</value>
+    <display-name>DataNode directories</display-name>
+    <description>Determines where on the local filesystem an DFS data node
+      should store its blocks.  If this is a comma-delimited
+      list of directories, then data will be stored in all named
+      directories, typically on different devices.
+      Directories that do not exist are ignored.
+    </description>
+    <final>true</final>
+    <value-attributes>
+      <type>directories</type>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>dfs.hosts.exclude</name>
+    <value>/etc/hadoop/conf/dfs.exclude</value>
+    <description>Names a file that contains a list of hosts that are
+      not permitted to connect to the namenode.  The full pathname of the
+      file must be specified.  If the value is empty, no hosts are
+      excluded.</description>
+  </property>
+
+  <!--
+    <property>
+      <name>dfs.hosts</name>
+      <value>/etc/hadoop/conf/dfs.include</value>
+      <description>Names a file that contains a list of hosts that are
+      permitted to connect to the namenode. The full pathname of the file
+      must be specified.  If the value is empty, all hosts are
+      permitted.</description>
+    </property>
+  -->
+
+  <property>
+    <name>dfs.namenode.checkpoint.dir</name>
+    <value>/hadoop/hdfs/namesecondary</value>
+    <display-name>SecondaryNameNode Checkpoint directories</display-name>
+    <description>Determines where on the local filesystem the DFS secondary
+      name node should store the temporary images to merge.
+      If this is a comma-delimited list of directories then the image is
+      replicated in all of the directories for redundancy.
+    </description>
+    <value-attributes>
+      <type>directories</type>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>dfs.namenode.checkpoint.edits.dir</name>
+    <value>${dfs.namenode.checkpoint.dir}</value>
+    <description>Determines where on the local filesystem the DFS secondary
+      name node should store the temporary edits to merge.
+      If this is a comma-delimited list of directoires then teh edits is
+      replicated in all of the directoires for redundancy.
+      Default value is same as dfs.namenode.checkpoint.dir
+    </description>
+  </property>
+
+
+  <property>
+    <name>dfs.namenode.checkpoint.period</name>
+    <value>21600</value>
+    <display-name>HDFS Maximum Checkpoint Delay</display-name>
+    <description>The number of seconds between two periodic checkpoints.</description>
+    <value-attributes>
+      <type>int</type>
+      <unit>seconds</unit>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>dfs.namenode.checkpoint.txns</name>
+    <value>1000000</value>
+    <description>The Secondary NameNode or CheckpointNode will create a checkpoint
+      of the namespace every 'dfs.namenode.checkpoint.txns' transactions,
+      regardless of whether 'dfs.namenode.checkpoint.period' has expired.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.replication.max</name>
+    <value>50</value>
+    <description>Maximal block replication.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.replication</name>
+    <value>3</value>
+    <description>Default block replication.</description>
+    <display-name>Block replication</display-name>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>dfs.heartbeat.interval</name>
+    <value>3</value>
+    <description>Determines datanode heartbeat interval in seconds.</description>
+  </property>
+
+  <property>
+    <name>dfs.heartbeat.interval</name>
+    <value>3</value>
+    <description>Determines datanode heartbeat interval in seconds.</description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.safemode.threshold-pct</name>
+    <value>0.999</value>
+    <description>
+      Specifies the percentage of blocks that should satisfy
+      the minimal replication requirement defined by dfs.namenode.replication.min.
+      Values less than or equal to 0 mean not to start in safe mode.
+      Values greater than 1 will make safe mode permanent.
+    </description>
+    <display-name>Minimum replicated blocks %</display-name>
+    <value-attributes>
+      <type>float</type>
+      <minimum>0.990</minimum>
+      <maximum>1.000</maximum>
+      <increment-step>0.001</increment-step>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>dfs.datanode.balance.bandwidthPerSec</name>
+    <value>6250000</value>
+    <description>
+      Specifies the maximum amount of bandwidth that each datanode
+      can utilize for the balancing purpose in term of
+      the number of bytes per second.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.https.port</name>
+    <value>50470</value>
+    <description>
+      This property is used by HftpFileSystem.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.address</name>
+    <value>0.0.0.0:50010</value>
+    <description>
+      The datanode server address and port for data transfer.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.http.address</name>
+    <value>0.0.0.0:50075</value>
+    <description>
+      The datanode http server address and port.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.https.address</name>
+    <value>0.0.0.0:50475</value>
+    <description>
+      The datanode https server address and port.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.blocksize</name>
+    <value>134217728</value>
+    <description>The default block size for new files.</description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.http-address</name>
+    <value>localhost:50070</value>
+    <description>The name of the default file system.  Either the
+      literal string "local" or a host:port for NDFS.</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.datanode.du.reserved</name>
+    <!-- cluster variant -->
+    <value>1073741824</value>
+    <display-name>Reserved space for HDFS</display-name>
+    <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.</description>
+    <value-attributes>
+      <type>int</type>
+      <unit>bytes</unit>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>dfs.datanode.ipc.address</name>
+    <value>0.0.0.0:8010</value>
+    <description>
+      The datanode ipc server address and port.
+      If the port is 0 then the server will start on a free port.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.blockreport.initialDelay</name>
+    <value>120</value>
+    <description>Delay for first block report in seconds.</description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.max.transfer.threads</name>
+    <value>8192</value>
+    <description>Specifies the maximum number of threads to use for transferring data in and out of the datanode.</description>
+    <display-name>DataNode max data transfer threads</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>48000</maximum>
+    </value-attributes>
+  </property>
+
+  <!-- Permissions configuration -->
+
+  <property>
+    <name>fs.permissions.umask-mode</name>
+    <value>022</value>
+    <description>
+      The octal umask used when creating files and directories.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.permissions.enabled</name>
+    <value>true</value>
+    <description>
+      If "true", enable permission checking in HDFS.
+      If "false", permission checking is turned off,
+      but all other behavior is unchanged.
+      Switching from one parameter value to the other does not change the mode,
+      owner or group of files or directories.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.permissions.superusergroup</name>
+    <value>hdfs</value>
+    <description>The name of the group of super-users.</description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.handler.count</name>
+    <value>64</value>
+    <description>Added to grow Queue size so that more client connections are allowed</description>
+    <display-name>NameNode Server threads</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>1</minimum>
+      <maximum>200</maximum>
+    </value-attributes>
+  </property>
+
+   <property>
+    <name>dfs.block.access.token.enable</name>
+    <value>true</value>
+    <description>
+      If "true", access tokens are used as capabilities for accessing datanodes.
+      If "false", no access tokens are checked on accessing datanodes.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.kerberos.principal</name>
+    <value>nn/_HOST@EXAMPLE.COM</value>
+    <description>
+      Kerberos principal name for the NameNode
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.kerberos.principal</name>
+    <value>nn/_HOST@EXAMPLE.COM</value>
+    <description>
+      Kerberos principal name for the secondary NameNode.
+    </description>
+  </property>
+
+
+  <!--
+    This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
+  -->
+  <property>
+    <name>dfs.namenode.kerberos.https.principal</name>
+    <value>HTTP/_HOST@EXAMPLE.COM</value>
+    <description>The Kerberos principal for the host that the NameNode runs on.</description>
+
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.kerberos.https.principal</name>
+    <value>HTTP/_HOST@EXAMPLE.COM</value>
+    <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
+
+  </property>
+
+  <property>
+    <!-- cluster variant -->
+    <name>dfs.namenode.secondary.http-address</name>
+    <value>localhost:50090</value>
+    <description>Address of secondary namenode web server</description>
+  </property>
+
+  <property>
+    <name>dfs.web.authentication.kerberos.principal</name>
+    <value>HTTP/_HOST@EXAMPLE.COM</value>
+    <description>
+      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
+      HTTP SPENGO specification.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.web.authentication.kerberos.keytab</name>
+    <value>/etc/security/keytabs/spnego.service.keytab</value>
+    <description>
+      The Kerberos keytab file with the credentials for the
+      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.kerberos.principal</name>
+    <value>dn/_HOST@EXAMPLE.COM</value>
+    <description>
+      The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.keytab.file</name>
+    <value>/etc/security/keytabs/nn.service.keytab</value>
+    <description>
+      Combined keytab file containing the namenode service and host principals.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.keytab.file</name>
+    <value>/etc/security/keytabs/nn.service.keytab</value>
+    <description>
+      Combined keytab file containing the namenode service and host principals.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.keytab.file</name>
+    <value>/etc/security/keytabs/dn.service.keytab</value>
+    <description>
+      The filename of the keytab file for the DataNode.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.https-address</name>
+    <value>localhost:50470</value>
+    <description>The https address where namenode binds</description>
+
+  </property>
+
+  <property>
+    <name>dfs.datanode.data.dir.perm</name>
+    <value>750</value>
+    <display-name>DataNode directories permission</display-name>
+    <description>The permissions that should be there on dfs.datanode.data.dir
+      directories. The datanode will not come up if the permissions are
+      different on existing dfs.datanode.data.dir directories. If the directories
+      don't exist, they will be created with this permission.</description>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>dfs.namenode.accesstime.precision</name>
+    <value>3600000</value>
+    <display-name>Access time precision</display-name>
+    <description>The access time for HDFS file is precise upto this value.
+      The default value is 1 hour. Setting a value of 0 disables
+      access times for HDFS.
+    </description>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>dfs.cluster.administrators</name>
+    <value> hdfs</value>
+    <description>ACL for who all can view the default servlets in the HDFS</description>
+    <value-attributes>
+      <visible>true</visible>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>dfs.namenode.avoid.read.stale.datanode</name>
+    <value>true</value>
+    <description>
+      Indicate whether or not to avoid reading from stale datanodes whose
+      heartbeat messages have not been received by the namenode for more than a
+      specified time interval.
+    </description>
+  </property>
+  <property>
+    <name>dfs.namenode.avoid.write.stale.datanode</name>
+    <value>true</value>
+    <description>
+      Indicate whether or not to avoid writing to stale datanodes whose
+      heartbeat messages have not been received by the namenode for more than a
+      specified time interval.
+    </description>
+  </property>
+  <property>
+    <name>dfs.namenode.write.stale.datanode.ratio</name>
+    <value>1.0f</value>
+    <description>When the ratio of number stale datanodes to total datanodes marked is greater
+      than this ratio, stop avoiding writing to stale nodes so as to prevent causing hotspots.
+    </description>
+  </property>
+  <property>
+    <name>dfs.namenode.stale.datanode.interval</name>
+    <value>30000</value>
+    <description>Datanode is stale after not getting a heartbeat in this interval in ms</description>
+  </property>
+
+  <property>
+    <name>dfs.journalnode.http-address</name>
+    <value>0.0.0.0:8480</value>
+    <description>The address and port the JournalNode web UI listens on.
+      If the port is 0 then the server will start on a free port. </description>
+  </property>
+
+  <property>
+    <name>dfs.journalnode.edits.dir</name>
+    <value>/grid/0/hdfs/journal</value>
+    <description>The path where the JournalNode daemon will store its local state. </description>
+  </property>
+
+  <!-- HDFS Short-Circuit Local Reads -->
+
+  <property>
+    <name>dfs.client.read.shortcircuit</name>
+    <value>true</value>
+    <description>This configuration parameter turns on short-circuit local reads.</description>
+    <display-name>HDFS Short-circuit read</display-name>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>dfs.domain.socket.path</name>
+    <value>/var/lib/hadoop-hdfs/dn_socket</value>
+    <description>
+      This is a path to a UNIX domain socket that will be used for communication between the DataNode and local HDFS clients.
+      If the string "_PORT" is present in this path, it will be replaced by the TCP port of the DataNode.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.client.read.shortcircuit.streams.cache.size</name>
+    <value>4096</value>
+    <description>
+      The DFSClient maintains a cache of recently opened file descriptors. This
+      parameter controls the size of that cache. Setting this higher will use
+      more file descriptors, but potentially provide better performance on
+      workloads involving lots of seeks.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.name.dir.restore</name>
+    <value>true</value>
+    <description>Set to true to enable NameNode to attempt recovering a previously failed dfs.namenode.name.dir.
+      When enabled, a recovery of any failed directory is attempted during checkpoint.</description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.handler.count</name>
+    <value>40</value>
+  </property>
+
+  <property>
+    <name>dfs.namenode.acls.enabled</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>dfs.client.file-block-storage-locations.timeout.millis</name>
+    <value>3000</value>
+  </property>
+
+  <property>
+    <name>dfs.client.mmap.enabled</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>dfs.datanode.max.locked.memory</name>
+    <value>0</value>
+  </property>
+
+  <property>
+    <name>dfs.datanode.hdfs-blocks-metadata.enabled</name>
+    <value>true</value>
+  </property>
+  <property>
+     <name>dfs.http.policy</name>
+     <value>HTTP_ONLY</value>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/ssl-client.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/ssl-client.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/ssl-client.xml
new file mode 100755
index 0000000..4513fdd
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/ssl-client.xml
@@ -0,0 +1,58 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<configuration>
+    <property>
+        <name>ssl.client.truststore.location</name>
+        <value>/etc/security/clientKeys/all.jks</value>
+        <description>Location of the trust store file.</description>
+    </property>
+    <property>
+        <name>ssl.client.truststore.type</name>
+        <value>jks</value>
+        <description>Optional. Default value is "jks".</description>
+    </property>
+    <property>
+        <name>ssl.client.truststore.password</name>
+        <value>bigdata</value>
+        <property-type>PASSWORD</property-type>
+        <description>Password to open the trust store file.</description>
+    </property>
+    <property>
+        <name>ssl.client.truststore.reload.interval</name>
+        <value>10000</value>
+        <description>Truststore reload interval, in milliseconds.</description>
+    </property>
+    <property>
+        <name>ssl.client.keystore.type</name>
+        <value>jks</value>
+        <description>Optional. Default value is "jks".</description>
+    </property>
+    <property>
+        <name>ssl.client.keystore.location</name>
+        <value>/etc/security/clientKeys/keystore.jks</value>
+        <description>Location of the keystore file.</description>
+    </property>
+    <property>
+        <name>ssl.client.keystore.password</name>
+        <value>bigdata</value>
+        <property-type>PASSWORD</property-type>
+        <description>Password to open the keystore file.</description>
+    </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/ssl-server.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/ssl-server.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/ssl-server.xml
new file mode 100755
index 0000000..f95793e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/ssl-server.xml
@@ -0,0 +1,64 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<configuration>
+    <property>
+        <name>ssl.server.truststore.location</name>
+        <value>/etc/security/serverKeys/all.jks</value>
+        <description>Location of the trust store file.</description>
+    </property>
+    <property>
+        <name>ssl.server.truststore.type</name>
+        <value>jks</value>
+        <description>Optional. Default value is "jks".</description>
+    </property>
+    <property>
+        <name>ssl.server.truststore.password</name>
+        <value>bigdata</value>
+        <property-type>PASSWORD</property-type>
+        <description>Password to open the trust store file.</description>
+    </property>
+    <property>
+        <name>ssl.server.truststore.reload.interval</name>
+        <value>10000</value>
+        <description>Truststore reload interval, in milliseconds.</description>
+    </property>
+    <property>
+        <name>ssl.server.keystore.type</name>
+        <value>jks</value>
+        <description>Optional. Default value is "jks".</description>
+    </property>
+    <property>
+        <name>ssl.server.keystore.location</name>
+        <value>/etc/security/serverKeys/keystore.jks</value>
+        <description>Location of the keystore file.</description>
+    </property>
+    <property>
+        <name>ssl.server.keystore.password</name>
+        <value>bigdata</value>
+        <property-type>PASSWORD</property-type>
+        <description>Password to open the keystore file.</description>
+    </property>
+    <property>
+        <name>ssl.server.keystore.keypassword</name>
+        <value>bigdata</value>
+        <property-type>PASSWORD</property-type>
+        <description>Password for private key in keystore file.</description>
+    </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/kerberos.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/kerberos.json
new file mode 100755
index 0000000..2d1674b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/kerberos.json
@@ -0,0 +1,242 @@
+{
+  "services": [
+    {
+      "name": "HDFS",
+      "identities": [
+        {
+          "name": "/spnego",
+          "principal": {
+            "configuration": "hdfs-site/dfs.web.authentication.kerberos.principal"
+          },
+          "keytab": {
+            "configuration": "hdfs-site/dfs.web.authentication.kerberos.keytab"
+          }
+        },
+        {
+          "name": "/smokeuser"
+        },
+        {
+          "name": "/hdfs"
+        }
+      ],
+      "auth_to_local_properties" : [
+        "core-site/hadoop.security.auth_to_local"
+      ],
+      "configurations": [
+        {
+          "core-site": {
+            "hadoop.security.authentication": "kerberos",
+            "hadoop.rpc.protection": "authentication",
+            "hadoop.security.authorization": "true",
+            "hadoop.security.auth_to_local": "",
+            "hadoop.http.authentication.kerberos.name.rules": "",
+            "hadoop.http.filter.initializers": "",
+            "hadoop.http.authentication.type": "simple",
+            "hadoop.http.authentication.signature.secret": "",
+            "hadoop.http.authentication.signature.secret.file": "",
+            "hadoop.http.authentication.signer.secret.provider": "",
+            "hadoop.http.authentication.signer.secret.provider.object": "",
+            "hadoop.http.authentication.token.validity": "",
+            "hadoop.http.authentication.cookie.domain": "",
+            "hadoop.http.authentication.cookie.path": "",
+            "hadoop.proxyuser.HTTP.groups": "${hadoop-env/proxyuser_group}"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name":  "HDFS_CLIENT",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            }
+          ]
+        },
+        {
+          "name": "NAMENODE",
+          "identities": [
+            {
+              "name": "hdfs",
+              "principal": {
+                "value": "${hadoop-env/hdfs_user}-${cluster_name}@${realm}",
+                "type" : "user" ,
+                "configuration": "hadoop-env/hdfs_principal_name",
+                "local_username" : "${hadoop-env/hdfs_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hdfs.headless.keytab",
+                "owner": {
+                  "name": "${hadoop-env/hdfs_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": "r"
+                },
+                "configuration": "hadoop-env/hdfs_user_keytab"
+              }
+            },
+            {
+              "name": "namenode_nn",
+              "principal": {
+                "value": "nn/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hdfs-site/dfs.namenode.kerberos.principal",
+                "local_username" : "${hadoop-env/hdfs_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/nn.service.keytab",
+                "owner": {
+                  "name": "${hadoop-env/hdfs_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hdfs-site/dfs.namenode.keytab.file"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "hdfs-site/dfs.namenode.kerberos.internal.spnego.principal"
+              }
+            }
+          ],
+          "configurations": [
+            {
+              "hdfs-site": {
+                "dfs.block.access.token.enable": "true"
+              }
+            }
+          ]
+        },
+        {
+          "name": "DATANODE",
+          "identities": [
+            {
+              "name": "datanode_dn",
+              "principal": {
+                "value": "dn/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hdfs-site/dfs.datanode.kerberos.principal",
+                "local_username" : "${hadoop-env/hdfs_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/dn.service.keytab",
+                "owner": {
+                  "name": "${hadoop-env/hdfs_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hdfs-site/dfs.datanode.keytab.file"
+              }
+            }
+          ],
+          "configurations" : [
+            {
+              "hdfs-site" : {
+                "dfs.datanode.address" : "0.0.0.0:1019",
+                "dfs.datanode.http.address": "0.0.0.0:1022"
+              }
+            }
+          ]
+        },
+        {
+          "name": "SECONDARY_NAMENODE",
+          "identities": [
+            {
+              "name": "secondary_namenode_nn",
+              "principal": {
+                "value": "nn/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hdfs-site/dfs.secondary.namenode.kerberos.principal",
+                "local_username" : "${hadoop-env/hdfs_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/nn.service.keytab",
+                "owner": {
+                  "name": "${hadoop-env/hdfs_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hdfs-site/dfs.secondary.namenode.keytab.file"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "hdfs-site/dfs.secondary.namenode.kerberos.internal.spnego.principal"
+              }
+            }
+          ]
+        },
+        {
+          "name": "NFS_GATEWAY",
+          "identities": [
+            {
+              "name": "nfsgateway",
+              "principal": {
+                "value": "nfs/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hdfs-site/nfs.kerberos.principal",
+                "local_username" : "${hadoop-env/hdfs_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/nfs.service.keytab",
+                "owner": {
+                  "name": "${hadoop-env/hdfs_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hdfs-site/nfs.keytab.file"
+              }
+            }
+          ]
+        },
+        {
+          "name": "JOURNALNODE",
+          "identities": [
+            {
+              "name": "journalnode_jn",
+              "principal": {
+                "value": "jn/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hdfs-site/dfs.journalnode.kerberos.principal",
+                "local_username" : "${hadoop-env/hdfs_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/jn.service.keytab",
+                "owner": {
+                  "name": "${hadoop-env/hdfs_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hdfs-site/dfs.journalnode.keytab.file"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "hdfs-site/dfs.journalnode.kerberos.internal.spnego.principal"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/metainfo.xml
new file mode 100755
index 0000000..918cdb3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/metainfo.xml
@@ -0,0 +1,234 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HDFS</name>
+      <displayName>HDFS</displayName>
+      <comment>Apache Hadoop Distributed File System</comment>
+      <version>2.7.1</version>
+      <components>
+        <component>
+          <name>NAMENODE</name>
+          <displayName>NameNode</displayName>
+          <category>MASTER</category>
+          <cardinality>1-2</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/namenode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/namenode.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+            <customCommand>
+              <name>REBALANCEHDFS</name>
+              <background>true</background>
+              <commandScript>
+                <script>scripts/namenode.py</script>
+                <scriptType>PYTHON</scriptType>
+              </commandScript>
+            </customCommand>
+          </customCommands>
+        </component>
+
+        <component>
+          <name>DATANODE</name>
+          <displayName>DataNode</displayName>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/datanode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>SECONDARY_NAMENODE</name>
+          <displayName>SNameNode</displayName>
+          <!-- TODO:  cardinality is conditional on HA usage -->
+          <cardinality>1</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/snamenode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>HDFS_CLIENT</name>
+          <displayName>HDFS Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/hdfs_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>xml</type>
+              <fileName>hdfs-site.xml</fileName>
+              <dictionaryName>hdfs-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>xml</type>
+              <fileName>core-site.xml</fileName>
+              <dictionaryName>core-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>log4j.properties</fileName>
+              <dictionaryName>hdfs-log4j,yarn-log4j</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>hadoop-env.sh</fileName>
+              <dictionaryName>hadoop-env</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+
+        <component>
+          <name>JOURNALNODE</name>
+          <displayName>JournalNode</displayName>
+          <category>SLAVE</category>
+          <cardinality>0+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/journalnode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>ZKFC</name>
+          <displayName>ZKFailoverController</displayName>
+          <category>SLAVE</category>
+          <!-- TODO: cardinality is conditional on HA topology -->
+          <cardinality>0+</cardinality>
+          <versionAdvertised>false</versionAdvertised>
+          <commandScript>
+            <script>scripts/zkfc_slave.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>hadoop</name>
+            </package>
+            <package>
+              <name>hadoop-lzo</name>
+              <condition>should_install_lzo</condition>
+            </package>
+          </packages>
+        </osSpecific>
+
+        <osSpecific>
+          <osFamily>redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>snappy</name>
+            </package>
+            <package>
+              <name>lzo</name>
+              <condition>should_install_lzo</condition>
+            </package>
+            <package>
+              <name>hadoop-lzo-native</name>
+              <condition>should_install_lzo</condition>
+            </package>
+            <package>
+              <name>hadoop-libhdfs</name>
+            </package>
+            <package>
+              <name>ambari-log4j</name>
+            </package>
+          </packages>
+        </osSpecific>
+
+        <osSpecific>
+          <osFamily>ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>libsnappy1</name>
+            </package>
+            <package>
+              <name>libsnappy-dev</name>
+            </package>
+            <package>
+              <name>liblzo2-2</name>
+              <condition>should_install_lzo</condition>
+            </package>
+            <package>
+              <name>hadoop-hdfs</name>
+            </package>
+            <package>
+              <name>libhdfs0</name>
+            </package>
+            <package>
+              <name>libhdfs0-dev</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <requiredServices>
+        <service>ZOOKEEPER</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>core-site</config-type>
+        <config-type>hdfs-site</config-type>
+        <config-type>hadoop-env</config-type>
+        <config-type>hadoop-policy</config-type>
+        <config-type>hdfs-log4j</config-type>
+        <config-type>ssl-client</config-type>
+        <config-type>ssl-server</config-type>
+      </configuration-dependencies>
+      <restartRequiredAfterRackChange>true</restartRequiredAfterRackChange>
+
+    </service>
+  </services>
+</metainfo>


[24/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/package/scripts/params.py
new file mode 100755
index 0000000..bc19704
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/package/scripts/params.py
@@ -0,0 +1,115 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.libraries.functions import format
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.version import format_stack_version, compare_versions
+from resource_management.libraries.functions.default import default
+from utils import get_bare_principal
+
+from resource_management.libraries.functions.get_stack_version import get_stack_version
+from resource_management.libraries.functions.is_empty import is_empty
+from resource_management.core.logger import Logger
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import get_kinit_path
+
+import status_params
+
+
+# server configurations
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+stack_name = default("/hostLevelParams/stack_name", None)
+
+version = default("/commandParams/version", None)
+# Version that is CURRENT.
+current_version = default("/hostLevelParams/current_version", None)
+
+host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+iop_stack_version = format_stack_version(stack_version_unformatted)
+upgrade_direction = default("/commandParams/upgrade_direction", None)
+
+# When downgrading the 'version' and 'current_version' are both pointing to the downgrade-target version
+# downgrade_from_version provides the source-version the downgrade is happening from
+downgrade_from_version = default("/commandParams/downgrade_from_version", None)
+
+# default kafka parameters
+kafka_home = '/usr/iop/current/kafka-broker'
+kafka_bin = kafka_home+'/bin/kafka'
+conf_dir = "/usr/iop/current/kafka-broker/config"
+
+kafka_user = config['configurations']['kafka-env']['kafka_user']
+kafka_log_dir = config['configurations']['kafka-env']['kafka_log_dir']
+kafka_pid_dir = status_params.kafka_pid_dir
+kafka_pid_file = kafka_pid_dir+"/kafka.pid"
+# This is hardcoded on the kafka bash process lifecycle on which we have no control over
+kafka_managed_pid_dir = "/var/run/kafka"
+kafka_managed_log_dir = "/var/log/kafka"
+hostname = config['hostname']
+user_group = config['configurations']['cluster-env']['user_group']
+java64_home = config['hostLevelParams']['java_home']
+kafka_env_sh_template = config['configurations']['kafka-env']['content']
+kafka_hosts = config['clusterHostInfo']['kafka_broker_hosts']
+kafka_hosts.sort()
+
+zookeeper_hosts = config['clusterHostInfo']['zookeeper_hosts']
+zookeeper_hosts.sort()
+
+if (('kafka-log4j' in config['configurations']) and ('content' in config['configurations']['kafka-log4j'])):
+    log4j_props = config['configurations']['kafka-log4j']['content']
+else:
+    log4j_props = None
+
+kafka_metrics_reporters=""
+metric_collector_host = ""
+metric_collector_port = ""
+
+ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
+has_metric_collector = not len(ams_collector_hosts) == 0
+
+if has_metric_collector:
+  metric_collector_host = ams_collector_hosts[0]
+  metric_collector_port = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
+  if metric_collector_port and metric_collector_port.find(':') != -1:
+    metric_collector_port = metric_collector_port.split(':')[1]
+
+  if not len(kafka_metrics_reporters) == 0:
+      kafka_metrics_reporters = kafka_metrics_reporters + ','
+
+  kafka_metrics_reporters = kafka_metrics_reporters + "org.apache.hadoop.metrics2.sink.kafka.KafkaTimelineMetricsReporter"
+
+
+# Security-related params
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+kafka_kerberos_enabled = ('security.inter.broker.protocol' in config['configurations']['kafka-broker'] and
+                          config['configurations']['kafka-broker']['security.inter.broker.protocol'] == "SASL_PLAINTEXT")
+
+if security_enabled and iop_stack_version != "" and 'kafka_principal_name' in config['configurations']['kafka-env'] and compare_versions(iop_stack_version, '4.1') >= 0:
+    _hostname_lowercase = config['hostname'].lower()
+    _kafka_principal_name = config['configurations']['kafka-env']['kafka_principal_name']
+    kafka_jaas_principal = _kafka_principal_name.replace('_HOST',_hostname_lowercase)
+    kafka_keytab_path = config['configurations']['kafka-env']['kafka_keytab']
+    kafka_bare_jaas_principal = get_bare_principal(_kafka_principal_name)
+    kafka_kerberos_params = "-Djava.security.auth.login.config="+ conf_dir +"/kafka_jaas.conf"
+else:
+    kafka_kerberos_params = ''

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/package/scripts/service_check.py
new file mode 100755
index 0000000..6f5fe72
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/package/scripts/service_check.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.validate import call_and_match_output
+from resource_management.libraries.functions.format import format
+from resource_management.core.logger import Logger
+
+class ServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    # TODO, Kafka was introduced in IOP 4.1 only tentatively supports running in a kerberized cluster
+    # Kafka uses its own Zookeeper instance and it does not yet have the capability of running in a secure mode.
+    kafka_config = self.read_kafka_config()
+
+    create_topic_cmd_created_output = "Created topic \"ambari_kafka_service_check\"."
+    create_topic_cmd_exists_output = "Topic \"ambari_kafka_service_check\" already exists."
+
+    source_cmd = format("source {conf_dir}/kafka-env.sh")
+    create_topic_cmd = format("{kafka_home}/bin/kafka-topics.sh --zookeeper {kafka_config[zookeeper.connect]} --create --topic ambari_kafka_service_check --partitions 1 --replication-factor 1")
+    command = source_cmd + " ; " + create_topic_cmd
+
+    Logger.info("Running kafka create topic command: %s" % command)
+    call_and_match_output(command, format("({create_topic_cmd_created_output})|({create_topic_cmd_exists_output})"), "Failed to check that topic exists")
+
+  def read_kafka_config(self):
+    import params
+
+    kafka_config = {}
+    with open(params.conf_dir+"/server.properties", "r") as conf_file:
+      for line in conf_file:
+        if line.startswith("#") or not line.strip():
+          continue
+
+        key,value = line.split("=")
+        kafka_config[key] = value.replace("\n","")
+
+    return kafka_config
+
+if __name__ == "__main__":
+    ServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/package/scripts/status_params.py
new file mode 100755
index 0000000..57bdf5e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/package/scripts/status_params.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.libraries.functions import format
+from resource_management.libraries.script.script import Script
+
+config = Script.get_config()
+
+kafka_pid_dir = config['configurations']['kafka-env']['kafka_pid_dir']
+kafka_pid_file = format("{kafka_pid_dir}/kafka.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/package/scripts/upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/package/scripts/upgrade.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/package/scripts/upgrade.py
new file mode 100755
index 0000000..16fc526
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/package/scripts/upgrade.py
@@ -0,0 +1,88 @@
+
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+
+from resource_management import *
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import Direction
+from resource_management.core.exceptions import Fail
+
+def run_migration(env, upgrade_type):
+  """
+  If the acl migration script is present, then run it for either upgrade or downgrade.
+  That script was introduced in HDP 2.3.4.0 and requires stopping all Kafka brokers first.
+  Requires configs to be present.
+  :param env: Environment.
+  :param upgrade_type: "rolling" or "nonrolling
+  """
+  import params
+
+  if upgrade_type is None:
+    raise Fail('Parameter "upgrade_type" is missing.')
+
+  if params.upgrade_direction is None:
+    raise Fail('Parameter "upgrade_direction" is missing.')
+
+  if params.upgrade_direction == Direction.DOWNGRADE and params.downgrade_from_version is None:
+    raise Fail('Parameter "downgrade_from_version" is missing.')
+
+  if not params.security_enabled:
+    Logger.info("Skip running the Kafka ACL migration script since cluster security is not enabled.")
+    return
+
+  Logger.info("Upgrade type: {0}, direction: {1}".format(str(upgrade_type), params.upgrade_direction))
+
+  # If the schema upgrade script exists in the version upgrading to, then attempt to upgrade/downgrade it while still using the present bits.
+  kafka_acls_script = None
+  command_suffix = ""
+  if params.upgrade_direction == Direction.UPGRADE:
+    kafka_acls_script = format("/usr/hdp/{version}/kafka/bin/kafka-acls.sh")
+    command_suffix = "--upgradeAcls"
+  elif params.upgrade_direction == Direction.DOWNGRADE:
+    kafka_acls_script = format("/usr/hdp/{downgrade_from_version}/kafka/bin/kafka-acls.sh")
+    command_suffix = "--downgradeAcls"
+
+  if kafka_acls_script is not None:
+    if os.path.exists(kafka_acls_script):
+      Logger.info("Found Kafka acls script: {0}".format(kafka_acls_script))
+      if params.zookeeper_connect is None:
+        raise Fail("Could not retrieve property kafka-broker/zookeeper.connect")
+
+      acls_command = "{0} --authorizer kafka.security.auth.SimpleAclAuthorizer --authorizer-properties zookeeper.connect={1} {2}".\
+        format(kafka_acls_script, params.zookeeper_connect, command_suffix)
+
+      Execute(acls_command,
+              user=params.kafka_user,
+              logoutput=True)
+    else:
+      Logger.info("Did not find Kafka acls script: {0}".format(kafka_acls_script))
+
+
+def prestart(env, component):
+  import params
+
+  if params.version and compare_versions(format_stack_version(params.version), '4.1.0.0') >= 0:
+    conf_select.select(params.stack_name, "kafka", params.version)
+    stack_select.select(component, params.version)

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/package/scripts/utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/package/scripts/utils.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/package/scripts/utils.py
new file mode 100755
index 0000000..2f1fa5e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/package/scripts/utils.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import re
+
+def get_bare_principal(normalized_principal_name):
+    """
+    Given a normalized principal name (nimbus/c6501.ambari.apache.org@EXAMPLE.COM) returns just the
+    primary component (nimbus)
+    :param normalized_principal_name: a string containing the principal name to process
+    :return: a string containing the primary component value or None if not valid
+    """
+
+    bare_principal = None
+
+    if normalized_principal_name:
+        match = re.match(r"([^/@]+)(?:/[^@])?(?:@.*)?", normalized_principal_name)
+
+    if match:
+        bare_principal = match.group(1)
+
+    return bare_principal

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/package/templates/kafka_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/package/templates/kafka_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/package/templates/kafka_jaas.conf.j2
new file mode 100755
index 0000000..56c558d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/package/templates/kafka_jaas.conf.j2
@@ -0,0 +1,41 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+KafkaServer {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useKeyTab=true
+   keyTab="{{kafka_keytab_path}}"
+   storeKey=true
+   useTicketCache=false
+   serviceName="{{kafka_bare_jaas_principal}}"
+   principal="{{kafka_jaas_principal}}";
+};
+KafkaClient {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useTicketCache=true
+   renewTicket=true
+   serviceName="{{kafka_bare_jaas_principal}}";
+};
+Client {
+   com.sun.security.auth.module.Krb5LoginModule required
+   useKeyTab=true
+   keyTab="{{kafka_keytab_path}}"
+   storeKey=true
+   useTicketCache=false
+   serviceName="zookeeper"
+   principal="{{kafka_jaas_principal}}";
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/configuration/kerberos-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/configuration/kerberos-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/configuration/kerberos-env.xml
new file mode 100755
index 0000000..1a70ac0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/configuration/kerberos-env.xml
@@ -0,0 +1,308 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+  <property require-input="true">
+    <name>kdc_type</name>
+    <display-name>KDC type</display-name>
+    <value>mit-kdc</value>
+    <description>
+      The type of KDC being used. Either mit-kdc or active-directory
+    </description>
+    <value>mit-kdc</value>
+    <display-name>KDC type</display-name>
+    <value-attributes>
+      <type>componentHost</type>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>manage_identities</name>
+    <description>
+      Indicates whether the Ambari user and service Kerberos identities (principals and keytab files)
+      should be managed (created, deleted, updated, etc...) by Ambari or managed manually.
+    </description>
+    <value>true</value>
+    <display-name>Manage Kerberos Identities</display-name>
+    <value-attributes>
+      <visible>false</visible>
+      <overridable>false</overridable>
+      <type>boolean</type>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>install_packages</name>
+    <display-name>Install OS-specific Kerberos client package(s)</display-name>
+    <description>
+      Indicates whether Ambari should install the Kerberos client package(s) or not. If not, it is
+      expected that Kerberos utility programs (such as kadmin, kinit, klist, and kdestroy) are
+      compatible with MIT Kerberos 5 version 1.10.3 in command line options and behaviors.
+    </description>
+    <value>true</value>
+    <value-attributes>
+      <type>boolean</type>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property require-input="true">
+    <name>ldap_url</name>
+    <display-name>LDAP url</display-name>
+    <description>
+      The URL to the Active Directory LDAP Interface
+    </description>
+    <value/>
+    <value-attributes>
+      <type>host</type>
+      <visible>false</visible>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property require-input="true">
+    <name>container_dn</name>
+    <display-name>Container DN</display-name>
+    <description>
+      The distinguished name (DN) of the container used store service principals
+    </description>
+    <value-attributes>
+      <visible>false</visible>
+      <overridable>false</overridable>
+    </value-attributes>
+    <value/>
+  </property>
+
+  <property require-input="true">
+    <name>encryption_types</name>
+    <display-name>Encryption Types</display-name>
+    <description>
+      The supported list of session key encryption types that should be returned by the KDC.
+    </description>
+    <value>aes des3-cbc-sha1 rc4 des-cbc-md5</value>
+    <value-attributes>
+      <type>multiLine</type>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property require-input="true">
+    <name>realm</name>
+    <description>
+      The default realm to use when creating service principals
+    </description>
+    <display-name>Realm name</display-name>
+    <value/>
+    <value-attributes>
+      <type>host</type>
+      <editable-only-at-install>true</editable-only-at-install>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property require-input="true">
+    <name>kdc_hosts</name>
+    <description>
+      A comma-delimited list of IP addresses or FQDNs declaring the KDC hosts.
+      Optionally a port number may be included in each entry by separating each host and port by a
+      colon (:). Example:  kdc1.example.com:88, kdc2.example.com:88
+    </description>
+    <display-name>KDC hosts</display-name>
+    <value/>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>admin_server_host</name>
+    <display-name>Kadmin host</display-name>
+    <description>
+      The IP address or FQDN for the KDC Kerberos administrative host. Optionally a port number may be included.
+    </description>
+    <value/>
+    <value-attributes>
+      <type>host</type>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>executable_search_paths</name>
+    <display-name>Executable Search Paths</display-name>
+    <description>
+      A comma-delimited list of search paths to use to find Kerberos utilities like kadmin and kinit.
+    </description>
+    <value>/usr/bin, /usr/kerberos/bin, /usr/sbin, /usr/lib/mit/bin, /usr/lib/mit/sbin</value>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>multiLine</type>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>password_length</name>
+    <display-name>Password Length</display-name>
+    <description>
+      The length required length for generated passwords.
+    </description>
+    <value>20</value>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>password_min_lowercase_letters</name>
+    <display-name>Password Minimum # Lowercase Letters</display-name>
+    <description>
+      The minimum number of lowercase letters (a-z) required in generated passwords
+    </description>
+    <value>1</value>
+    <value-attributes>
+      <type>int</type>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>password_min_uppercase_letters</name>
+    <display-name>Password Minimum # Uppercase Letters</display-name>
+    <description>
+      The minimum number of uppercase letters (A-Z) required in generated passwords
+    </description>
+    <value>1</value>
+    <value-attributes>
+      <type>int</type>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>password_min_digits</name>
+    <display-name>Password Minimum # Digits</display-name>
+    <description>
+      The minimum number of digits (0-9) required in generated passwords
+    </description>
+    <value>1</value>
+    <value-attributes>
+      <type>int</type>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>password_min_punctuation</name>
+    <display-name>Password Minimum # Punctuation Characters</display-name>
+    <description>
+      The minimum number of punctuation characters (?.!$%^*()-_+=~) required in generated passwords
+    </description>
+    <value>1</value>
+    <value-attributes>
+      <type>int</type>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>password_min_whitespace</name>
+    <display-name>Password Minimum # Whitespace Characters</display-name>
+    <description>
+      The minimum number of whitespace characters required in generated passwords
+    </description>
+    <value>0</value>
+    <value-attributes>
+      <type>int</type>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>service_check_principal_name</name>
+    <display-name>Test Kerberos Principal</display-name>
+    <description>
+      The principal name to use when executing the Kerberos service check
+    </description>
+    <value>${cluster_name}-${short_date}</value>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>case_insensitive_username_rules</name>
+    <display-name>Enable case insensitive username rules</display-name>
+    <description>
+      Force principal names to resolve to lowercase local usernames in auth-to-local rules
+    </description>
+    <value>false</value>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>boolean</type>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>ad_create_attributes_template</name>
+    <display-name>Account Attribute Template</display-name>
+    <description>
+      A Velocity template to use to generate a JSON-formatted document containing the set of
+      attribute names and values needed to create a new Kerberos identity in the relevant
+      Active Directory.
+      Variables include:
+      principal_name, principal_primary, principal_instance, realm, realm_lowercase,
+      normalized_principal, principal digest, password, is_service, container_dn
+    </description>
+    <value>
+{
+  "objectClass": ["top", "person", "organizationalPerson", "user"],
+  "cn": "$principal_name",
+  #if( $is_service )
+  "servicePrincipalName": "$principal_name",
+  #end
+  "userPrincipalName": "$normalized_principal",
+  "unicodePwd": "$password",
+  "accountExpires": "0",
+  "userAccountControl": "66048"
+}
+    </value>
+    <value-attributes>
+      <type>content</type>
+      <empty-value-valid>true</empty-value-valid>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>kdc_create_attributes</name>
+    <display-name>Principal Attributes</display-name>
+    <description>
+      The set of attributes to use when creating a new Kerberos identity in the relevant (MIT) KDC.
+    </description>
+    <value/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/configuration/krb5-conf.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/configuration/krb5-conf.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/configuration/krb5-conf.xml
new file mode 100755
index 0000000..ce5d4b4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/configuration/krb5-conf.xml
@@ -0,0 +1,109 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property require-input="false">
+    <name>domains</name>
+    <display-name>Domains</display-name>
+    <description>
+      A comma-separated list of domain names used to map server host names to the Realm name (e.g. .example.com,example.com). This is optional
+    </description>
+    <value/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>manage_krb5_conf</name>
+    <display-name>Manage Kerberos client krb5.conf</display-name>
+    <description>
+      Indicates whether your krb5.conf file should be managed by the wizard or should you manage it yourself
+    </description>
+    <value>true</value>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>boolean</type>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>conf_dir</name>
+    <display-name>krb5-conf directory path</display-name>
+    <description>The krb5.conf configuration directory</description>
+    <value>/etc</value>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+  <property>
+    <name>content</name>
+    <display-name>krb5-conf template</display-name>
+    <description>Customizable krb5.conf template (Jinja template engine)</description>
+    <value>
+[libdefaults]
+  renew_lifetime = 7d
+  forwardable = true
+  default_realm = {{realm}}
+  ticket_lifetime = 24h
+  dns_lookup_realm = false
+  dns_lookup_kdc = false
+  #default_tgs_enctypes = {{encryption_types}}
+  #default_tkt_enctypes = {{encryption_types}}
+
+{% if domains %}
+[domain_realm]
+{% for domain in domains.split(',') %}
+  {{domain}} = {{realm}}
+{% endfor %}
+{% endif %}
+
+[logging]
+  default = FILE:/var/log/krb5kdc.log
+  admin_server = FILE:/var/log/kadmind.log
+  kdc = FILE:/var/log/krb5kdc.log
+
+[realms]
+  {{realm}} = {
+{%- if kdc_hosts &gt; 0 -%}
+{%- set kdc_host_list = kdc_hosts.split(',')  -%}
+{%- if kdc_host_list and kdc_host_list|length &gt; 0 %}
+    admin_server = {{admin_server_host|default(kdc_host_list[0]|trim(), True)}}
+{%- if kdc_host_list -%}
+{% for kdc_host in kdc_host_list %}
+    kdc = {{kdc_host|trim()}}
+{%- endfor -%}
+{% endif %}
+{%- endif %}
+{%- endif %}
+  }
+
+{# Append additional realm declarations below #}
+    </value>
+    <value-attributes>
+      <type>content</type>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/kerberos.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/kerberos.json
new file mode 100755
index 0000000..6ab7610
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/kerberos.json
@@ -0,0 +1,17 @@
+{
+  "services": [
+    {
+      "name": "KERBEROS",
+      "identities": [
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "components": [
+        {
+          "name": "KERBEROS_CLIENT"
+        }
+      ]
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/metainfo.xml
new file mode 100755
index 0000000..a9b6ca2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/metainfo.xml
@@ -0,0 +1,147 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>KERBEROS</name>
+      <displayName>Kerberos</displayName>
+      <comment>A computer network authentication protocol which works on
+        the basis of 'tickets' to allow nodes communicating over a
+        non-secure network to prove their identity to one another in a
+        secure manner.
+      </comment>
+      <version>1.10.3</version>
+
+      <components>
+        <component>
+          <name>KERBEROS_CLIENT</name>
+          <displayName>Kerberos Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>ALL</cardinality>
+          <versionAdvertised>false</versionAdvertised>
+          <auto-deploy>
+            <enabled>true</enabled>
+          </auto-deploy>
+          <commandScript>
+            <script>scripts/kerberos_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>SET_KEYTAB</name>
+              <commandScript>
+                <script>scripts/kerberos_client.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>1000</timeout>
+              </commandScript>
+            </customCommand>
+            <customCommand>
+              <name>REMOVE_KEYTAB</name>
+              <commandScript>
+                <script>scripts/kerberos_client.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>1000</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
+          <configFiles>
+            <configFile>
+              <type>env</type>
+              <fileName>krb5.conf</fileName>
+              <dictionaryName>krb5-conf</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>redhat7,redhat6</osFamily>
+          <packages>
+            <package>
+              <name>krb5-server</name>
+              <skipUpgrade>true</skipUpgrade>
+            </package>
+            <package>
+              <name>krb5-libs</name>
+              <skipUpgrade>true</skipUpgrade>
+            </package>
+            <package>
+              <name>krb5-workstation</name>
+              <skipUpgrade>true</skipUpgrade>
+            </package>
+          </packages>
+        </osSpecific>
+
+        <osSpecific>
+          <osFamily>debian7,ubuntu12,ubuntu14</osFamily>
+          <packages>
+            <package>
+              <name>krb5-kdc</name>
+              <skipUpgrade>true</skipUpgrade>
+            </package>
+            <package>
+              <name>krb5-admin-server</name>
+              <skipUpgrade>true</skipUpgrade>
+            </package>
+            <package>
+              <name>krb5-user</name>
+              <skipUpgrade>true</skipUpgrade>
+            </package>
+            <package>
+              <name>krb5-config</name>
+              <skipUpgrade>true</skipUpgrade>
+            </package>
+          </packages>
+        </osSpecific>
+
+        <osSpecific>
+          <osFamily>suse11</osFamily>
+          <packages>
+            <package>
+              <name>krb5</name>
+              <skipUpgrade>true</skipUpgrade>
+            </package>
+            <package>
+              <name>krb5-client</name>
+              <skipUpgrade>true</skipUpgrade>
+            </package>
+            <package>
+              <name>krb5-server</name>
+              <skipUpgrade>true</skipUpgrade>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>krb5-conf</config-type>
+        <config-type>kerberos-env</config-type>
+      </configuration-dependencies>
+      <restartRequiredAfterChange>true</restartRequiredAfterChange>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/package/scripts/kerberos_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/package/scripts/kerberos_client.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/package/scripts/kerberos_client.py
new file mode 100755
index 0000000..8e2fa93
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/package/scripts/kerberos_client.py
@@ -0,0 +1,79 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from kerberos_common import *
+from resource_management.libraries.functions.security_commons import cached_kinit_executor
+
+class KerberosClient(KerberosScript):
+  def install(self, env):
+    install_packages = default('/configurations/kerberos-env/install_packages', "true")
+    if install_packages:
+      self.install_packages(env)
+    else:
+      print "Kerberos client packages are not being installed, manual installation is required."
+
+    self.configure(env)
+
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    if params.manage_krb5_conf:
+      self.write_krb5_conf()
+    #delete krb cache to prevent using old krb tickets on fresh kerberos setup
+    self.clear_tmp_cache()
+
+    #self.setup_jce()
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+  def security_status(self, env):
+    import status_params
+    if status_params.security_enabled:
+      if status_params.smoke_user and status_params.smoke_user_keytab:
+        try:
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.smoke_user,
+                                status_params.smoke_user_keytab,
+                                status_params.smoke_user_principal,
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        self.put_structured_out({"securityState": "UNKNOWN"})
+        self.put_structured_out({"securityStateErrorInfo": "Missing smoke user credentials"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+  def set_keytab(self, env):
+    self.write_keytab_file()
+
+  def remove_keytab(self, env):
+    self.delete_keytab_file()
+
+  #def download_install_jce(self, env):
+  #  self.setup_jce()
+
+
+if __name__ == "__main__":
+  KerberosClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/package/scripts/kerberos_common.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/package/scripts/kerberos_common.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/package/scripts/kerberos_common.py
new file mode 100755
index 0000000..a05aead
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/package/scripts/kerberos_common.py
@@ -0,0 +1,473 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import base64
+import os
+import string
+import subprocess
+import sys
+import tempfile
+from tempfile import gettempdir
+
+from resource_management import *
+from utils import get_property_value
+from ambari_commons.os_utils import remove_file
+from ambari_agent import Constants
+
+class KerberosScript(Script):
+  KRB5_REALM_PROPERTIES = [
+    'kdc',
+    'admin_server',
+    'default_domain',
+    'master_kdc'
+  ]
+
+  KRB5_SECTION_NAMES = [
+    'libdefaults',
+    'logging',
+    'realms',
+    'domain_realm',
+    'capaths',
+    'ca_paths',
+    'appdefaults',
+    'plugins'
+  ]
+
+  @staticmethod
+  def create_random_password():
+    import random
+
+    chars = string.digits + string.ascii_letters
+    return ''.join(random.choice(chars) for x in range(13))
+
+  @staticmethod
+  def write_conf_section(output_file, section_name, section_data):
+    if section_name is not None:
+      output_file.write('[%s]\n' % section_name)
+
+      if section_data is not None:
+        for key, value in section_data.iteritems():
+          output_file.write(" %s = %s\n" % (key, value))
+
+
+  @staticmethod
+  def _write_conf_realm(output_file, realm_name, realm_data):
+    """ Writes out realm details
+
+    Example:
+
+     EXAMPLE.COM = {
+      kdc = kerberos.example.com
+      admin_server = kerberos.example.com
+     }
+
+    """
+    if realm_name is not None:
+      output_file.write(" %s = {\n" % realm_name)
+
+      if realm_data is not None:
+        for key, value in realm_data.iteritems():
+          if key in KerberosScript.KRB5_REALM_PROPERTIES:
+            output_file.write("  %s = %s\n" % (key, value))
+
+      output_file.write(" }\n")
+
+  @staticmethod
+  def write_conf_realms_section(output_file, section_name, realms_data):
+    if section_name is not None:
+      output_file.write('[%s]\n' % section_name)
+
+      if realms_data is not None:
+        for realm, realm_data in realms_data.iteritems():
+          KerberosScript._write_conf_realm(output_file, realm, realm_data)
+          output_file.write('\n')
+
+  @staticmethod
+  def write_krb5_conf():
+    import params
+
+    Directory(params.krb5_conf_dir,
+              owner='root',
+              create_parents=True,
+              group='root',
+              mode=0755
+    )
+
+    if (params.krb5_conf_template is None) or not params.krb5_conf_template.strip():
+      content = Template('krb5_conf.j2')
+    else:
+      content = InlineTemplate(params.krb5_conf_template)
+
+    File(params.krb5_conf_path,
+         content=content,
+         owner='root',
+         group='root',
+         mode=0644
+    )
+
+  @staticmethod
+  def invoke_kadmin(query, admin_identity=None, default_realm=None):
+    """
+    Executes the kadmin or kadmin.local command (depending on whether auth_identity is set or not
+    and returns command result code and standard out data.
+
+    :param query: the kadmin query to execute
+    :param admin_identity: the identity for the administrative user (optional)
+    :param default_realm: the default realm to assume
+    :return: return_code, out
+    """
+    if (query is not None) and (len(query) > 0):
+      auth_principal = None
+      auth_keytab_file = None
+
+      if admin_identity is not None:
+        auth_principal = get_property_value(admin_identity, 'principal')
+
+      if auth_principal is None:
+        kadmin = 'kadmin.local'
+        credential = ''
+      else:
+        kadmin = 'kadmin -p "%s"' % auth_principal
+
+        auth_password = get_property_value(admin_identity, 'password')
+
+        if auth_password is None:
+          auth_keytab = get_property_value(admin_identity, 'keytab')
+
+          if auth_keytab is not None:
+            (fd, auth_keytab_file) = tempfile.mkstemp()
+            os.write(fd, base64.b64decode(auth_keytab))
+            os.close(fd)
+
+          credential = '-k -t %s' % auth_keytab_file
+        else:
+          credential = '-w "%s"' % auth_password
+
+      if (default_realm is not None) and (len(default_realm) > 0):
+        realm = '-r %s' % default_realm
+      else:
+        realm = ''
+
+      try:
+        command = '%s %s %s -q "%s"' % (kadmin, credential, realm, query.replace('"', '\\"'))
+        return shell.checked_call(command)
+      except:
+        raise
+      finally:
+        if auth_keytab_file is not None:
+          os.remove(auth_keytab_file)
+
+  @staticmethod
+  def create_keytab_file(principal, path, auth_identity=None):
+    success = False
+
+    if (principal is not None) and (len(principal) > 0):
+      if (auth_identity is None) or (len(auth_identity) == 0):
+        norandkey = '-norandkey'
+      else:
+        norandkey = ''
+
+      if (path is not None) and (len(path) > 0):
+        keytab_file = '-k %s' % path
+      else:
+        keytab_file = ''
+
+      try:
+        result_code, output = KerberosScript.invoke_kadmin(
+          'ktadd %s %s %s' % (keytab_file, norandkey, principal),
+          auth_identity)
+
+        success = (result_code == 0)
+      except:
+        raise Fail("Failed to create keytab for principal: %s (in %s)" % (principal, path))
+
+    return success
+
+  @staticmethod
+  def create_keytab(principal, auth_identity=None):
+    keytab = None
+
+    (fd, temp_path) = tempfile.mkstemp()
+    os.remove(temp_path)
+
+    try:
+      if KerberosScript.create_keytab_file(principal, temp_path, auth_identity):
+        with open(temp_path, 'r') as f:
+          keytab = base64.b64encode(f.read())
+    finally:
+      if os.path.isfile(temp_path):
+        os.remove(temp_path)
+
+    return keytab
+
+  @staticmethod
+  def principal_exists(identity, auth_identity=None):
+    exists = False
+
+    if identity is not None:
+      principal = get_property_value(identity, 'principal')
+
+      if (principal is not None) and (len(principal) > 0):
+        try:
+          result_code, output = KerberosScript.invoke_kadmin('getprinc %s' % principal,
+                                                             auth_identity)
+          exists = (output is not None) and (("Principal: %s" % principal) in output)
+        except:
+          raise Fail("Failed to determine if principal exists: %s" % principal)
+
+    return exists
+
+  @staticmethod
+  def change_principal_password(identity, auth_identity=None):
+    success = False
+
+    if identity is not None:
+      principal = get_property_value(identity, 'principal')
+
+      if (principal is not None) and (len(principal) > 0):
+        password = get_property_value(identity, 'password')
+
+        if password is None:
+          credentials = '-randkey'
+        else:
+          credentials = '-pw "%s"' % password
+
+        try:
+          result_code, output = KerberosScript.invoke_kadmin(
+            'change_password %s %s' % (credentials, principal),
+            auth_identity)
+
+          success = (result_code == 0)
+        except:
+          raise Fail("Failed to create principal: %s" % principal)
+
+    return success
+
+  @staticmethod
+  def create_principal(identity, auth_identity=None):
+    success = False
+
+    if identity is not None:
+      principal = get_property_value(identity, 'principal')
+
+      if (principal is not None) and (len(principal) > 0):
+        password = get_property_value(identity, 'password')
+
+        if password is None:
+          credentials = '-randkey'
+        else:
+          credentials = '-pw "%s"' % password
+
+        try:
+          result_code, out = KerberosScript.invoke_kadmin(
+            'addprinc %s %s' % (credentials, principal),
+            auth_identity)
+
+          success = (result_code == 0)
+        except:
+          raise Fail("Failed to create principal: %s" % principal)
+
+    return success
+
+  @staticmethod
+  def clear_tmp_cache():
+    tmp_dir = Constants.AGENT_TMP_DIR
+    if tmp_dir is None:
+      tmp_dir = gettempdir()
+    curl_krb_cache_path = os.path.join(tmp_dir, "curl_krb_cache")
+    Directory(curl_krb_cache_path, action="delete")
+
+  @staticmethod
+  def create_principals(identities, auth_identity=None):
+    if identities is not None:
+      for identity in identities:
+        KerberosScript.create_principal(identity, auth_identity)
+
+  @staticmethod
+  def create_or_update_administrator_identity():
+    import params
+
+    if params.realm is not None:
+      admin_identity = params.get_property_value(params.realm, 'admin_identity')
+
+      if KerberosScript.principal_exists(admin_identity):
+        KerberosScript.change_principal_password(admin_identity)
+      else:
+        KerberosScript.create_principal(admin_identity)
+
+  @staticmethod
+  def test_kinit(identity, user=None):
+    principal = get_property_value(identity, 'principal')
+    kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+    kdestroy_path_local = functions.get_kdestroy_path(default('/configurations/kerberos-env/executable_search_paths', None))
+
+    if principal is not None:
+      keytab_file = get_property_value(identity, 'keytab_file')
+      keytab = get_property_value(identity, 'keytab')
+      password = get_property_value(identity, 'password')
+
+      # If a test keytab file is available, simply use it
+      if (keytab_file is not None) and (os.path.isfile(keytab_file)):
+        command = '%s -k -t %s %s' % (kinit_path_local, keytab_file, principal)
+        Execute(command,
+          user = user,
+        )
+        return shell.checked_call(kdestroy_path_local)
+
+      # If base64-encoded test keytab data is available; then decode it, write it to a temporary file
+      # use it, and then remove the temporary file
+      elif keytab is not None:
+        (fd, test_keytab_file) = tempfile.mkstemp()
+        os.write(fd, base64.b64decode(keytab))
+        os.close(fd)
+
+        try:
+          command = '%s -k -t %s %s' % (kinit_path_local, test_keytab_file, principal)
+          Execute(command,
+            user = user,
+          )
+          return shell.checked_call(kdestroy_path_local)
+        except:
+          raise
+        finally:
+          if test_keytab_file is not None:
+            os.remove(test_keytab_file)
+
+      # If no keytab data is available and a password was supplied, simply use it.
+      elif password is not None:
+        process = subprocess.Popen([kinit_path_local, principal], stdin=subprocess.PIPE)
+        stdout, stderr = process.communicate(password)
+        if process.returncode:
+          err_msg = Logger.filter_text("Execution of kinit returned %d. %s" % (process.returncode, stderr))
+          raise Fail(err_msg)
+        else:
+          return shell.checked_call(kdestroy_path_local)
+      else:
+        return 0, ''
+    else:
+      return 0, ''
+
+
+  def write_keytab_file(self):
+    import params
+    import stat
+
+    if params.kerberos_command_params is not None:
+      for item  in params.kerberos_command_params:
+        keytab_content_base64 = get_property_value(item, 'keytab_content_base64')
+        if (keytab_content_base64 is not None) and (len(keytab_content_base64) > 0):
+          keytab_file_path = get_property_value(item, 'keytab_file_path')
+          if (keytab_file_path is not None) and (len(keytab_file_path) > 0):
+            head, tail = os.path.split(keytab_file_path)
+            if head:
+              Directory(head, create_parents=True, mode=0755, owner="root", group="root")
+
+            owner = get_property_value(item, 'keytab_file_owner_name')
+            owner_access = get_property_value(item, 'keytab_file_owner_access')
+            group = get_property_value(item, 'keytab_file_group_name')
+            group_access = get_property_value(item, 'keytab_file_group_access')
+            mode = 0
+
+            if owner_access == 'rw':
+              mode |= stat.S_IREAD | stat.S_IWRITE
+            else:
+              mode |= stat.S_IREAD
+
+            if group_access == 'rw':
+              mode |= stat.S_IRGRP | stat.S_IWGRP
+            elif group_access == 'r':
+              mode |= stat.S_IRGRP
+
+            keytab_content = base64.b64decode(keytab_content_base64)
+
+            # to hide content in command output
+            def make_lambda(data):
+              return lambda: data
+
+            File(keytab_file_path,
+                 content=make_lambda(keytab_content),
+                 mode=mode,
+                 owner=owner,
+                 group=group)
+
+            principal = get_property_value(item, 'principal')
+            if principal is not None:
+              curr_content = Script.structuredOut
+
+              if "keytabs" not in curr_content:
+                curr_content['keytabs'] = {}
+
+              curr_content['keytabs'][principal.replace("_HOST", params.hostname)] = keytab_file_path
+
+              self.put_structured_out(curr_content)
+
+  def delete_keytab_file(self):
+    import params
+
+    if params.kerberos_command_params is not None:
+      for item in params.kerberos_command_params:
+        keytab_file_path = get_property_value(item, 'keytab_file_path')
+        if (keytab_file_path is not None) and (len(keytab_file_path) > 0):
+
+          # Delete the keytab file
+          File(keytab_file_path, action="delete")
+
+          principal = get_property_value(item, 'principal')
+          if principal is not None:
+            curr_content = Script.structuredOut
+
+            if "keytabs" not in curr_content:
+              curr_content['keytabs'] = {}
+
+            curr_content['keytabs'][principal.replace("_HOST", params.hostname)] = '_REMOVED_'
+
+            self.put_structured_out(curr_content)
+
+  def setup_jce(self):
+    import params
+
+    if not params.jdk_name:
+      return
+    jce_curl_target = None
+    if params.jce_policy_zip is not None:
+      jce_curl_target = format("{artifact_dir}/{jce_policy_zip}")
+      Directory(params.artifact_dir,
+                create_parents = True,
+                )
+      File(jce_curl_target,
+           content = DownloadSource(format("{jce_location}/{jce_policy_zip}")),
+           )
+    elif params.security_enabled:
+      # Something weird is happening
+      raise Fail("Security is enabled, but JCE policy zip is not specified.")
+
+    # The extraction will occur only after the security flag is set
+    if params.security_enabled:
+      security_dir = format("{java_home}/jre/lib/security")
+
+      File([format("{security_dir}/US_export_policy.jar"), format("{security_dir}/local_policy.jar")],
+           action = "delete",
+           )
+
+      extract_cmd = ("unzip", "-o", "-j", "-q", jce_curl_target, "-d", security_dir)
+      Execute(extract_cmd,
+              only_if = format("test -e {security_dir} && test -f {jce_curl_target}"),
+              path = ['/bin/','/usr/bin'],
+              sudo = True
+      )

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/package/scripts/kerberos_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/package/scripts/kerberos_server.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/package/scripts/kerberos_server.py
new file mode 100755
index 0000000..b98b265
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/package/scripts/kerberos_server.py
@@ -0,0 +1,141 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from kerberos_common import *
+from ambari_commons.os_check import OSCheck
+
+class KerberosServer(KerberosScript):
+  @staticmethod
+  def write_kadm5_acl():
+    import params
+
+    Directory(params.kadm5_acl_dir,
+              owner='root',
+              create_parents=True,
+              group='root',
+              mode=0700
+    )
+
+    if (params.kadm5_acl_template is None) or not params.kadm5_acl_template.strip():
+      content = Template('kadm5_acl.j2')
+    else:
+      content = InlineTemplate(params.kadm5_acl_template)
+
+    File(params.kadm5_acl_path,
+         content=content,
+         owner='root',
+         group='root',
+         mode=0600
+    )
+
+  @staticmethod
+  def write_kdc_conf():
+    import params
+
+    Directory(params.kdc_conf_dir,
+              owner='root',
+              create_parents=True,
+              group='root',
+              mode=0700
+    )
+
+    if (params.kdc_conf_template is None) or not params.kdc_conf_template.strip():
+      content = Template('kdc_conf.j2')
+    else:
+      content = InlineTemplate(params.kdc_conf_template)
+
+    File(params.kdc_conf_path,
+         content=content,
+         owner='root',
+         group='root',
+         mode=0600
+    )
+
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    self.configure(env)
+
+    # Create the Kerberos database (only on install, for now)
+    Execute(
+      "%s create -s -P '%s'" % (params.kdb5_util_path, KerberosScript.create_random_password()))
+
+    # Create or update the administrator account
+    KerberosScript.create_or_update_administrator_identity()
+
+
+  def start(self, env):
+    # Attempt to reconfigure the service before starting
+    self.configure(env)
+
+    # Create or update the administrator account
+    KerberosScript.create_or_update_administrator_identity()
+
+    if OSCheck.is_suse_family():
+      Execute('rckadmind start')
+      Execute('rckrb5kdc start')
+    elif OSCheck.is_ubuntu_family():
+      Execute('service krb5-kdc start')
+      Execute('service krb5-admin-server start')
+    else:
+      Execute('service krb5kdc start')
+      Execute('service kadmin start')
+
+  def stop(self, env):
+    if OSCheck.is_suse_family():
+      Execute('rckadmind stop')
+      Execute('rckrb5kdc stop')
+    elif OSCheck.is_ubuntu_family():
+      Execute('service krb5-kdc stop')
+      Execute('service krb5-admin-server stop')
+    else:
+      Execute('service krb5kdc stop')
+      Execute('service kadmin stop')
+
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    KerberosServer.write_krb5_conf()
+    KerberosServer.write_kdc_conf()
+    KerberosServer.write_kadm5_acl()
+
+  def status(self, env):
+    import params
+
+    if OSCheck.is_suse_family():
+      try:
+        Execute('checkproc `which krb5kdc`')
+        Execute('checkproc `which kadmind`')
+      except Fail as ex:
+        raise ComponentIsNotRunning()
+
+    elif OSCheck.is_ubuntu_family():
+      check_process_status(params.kdamin_pid_path)
+      check_process_status(params.krb5kdc_pid_path)
+
+    else:
+      check_process_status(params.kdamin_pid_path)
+      check_process_status(params.krb5kdc_pid_path)
+
+
+if __name__ == "__main__":
+  KerberosServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/package/scripts/params.py
new file mode 100755
index 0000000..03f208e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/package/scripts/params.py
@@ -0,0 +1,200 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from utils import get_property_value, get_unstructured_data
+from ambari_commons.os_check import OSCheck
+
+krb5_conf_dir = '/etc'
+krb5_conf_file = 'krb5.conf'
+krb5_conf_path = krb5_conf_dir + '/' + krb5_conf_file
+
+if OSCheck.is_suse_family():
+  kdc_conf_dir = '/var/lib/kerberos/krb5kdc'
+elif OSCheck.is_ubuntu_family():
+  kdc_conf_dir = '/etc/krb5kdc'
+else:
+  kdc_conf_dir = '/var/kerberos/krb5kdc'
+kdc_conf_file = 'kdc.conf'
+kdc_conf_path = kdc_conf_dir + '/' + kdc_conf_file
+
+kadm5_acl_dir = kdc_conf_dir  # Typically kadm5.acl and kdc.conf exist in the same directory
+kadm5_acl_file = 'kadm5.acl'
+kadm5_acl_path = kadm5_acl_dir + '/' + kadm5_acl_file
+
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+
+configurations = None
+keytab_details = None
+default_group = None
+kdc_server_host = None
+cluster_host_info = None
+
+hostname = config['hostname']
+
+kdb5_util_path = 'kdb5_util'
+
+kdamin_pid_path = '/var/run/kadmind.pid'
+krb5kdc_pid_path = '/var/run/krb5kdc.pid'
+
+smoke_test_principal = None
+smoke_test_keytab_file = None
+
+smoke_user = 'ambari-qa'
+
+manage_identities = 'true'
+
+artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
+jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
+jce_location = config['hostLevelParams']['jdk_location']
+jdk_name = default("/hostLevelParams/jdk_name", None)
+java_home = config['hostLevelParams']['java_home']
+java_version = int(config['hostLevelParams']['java_version'])
+
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+if config is not None:
+  kerberos_command_params = get_property_value(config, 'kerberosCommandParams')
+
+  cluster_host_info = get_property_value(config, 'clusterHostInfo')
+  if cluster_host_info is not None:
+    kdc_server_hosts = get_property_value(cluster_host_info, 'kdc_server_hosts')
+
+    if (kdc_server_hosts is not None) and (len(kdc_server_hosts) > 0):
+      kdc_server_host = kdc_server_hosts[0]
+
+  configurations = get_property_value(config, 'configurations')
+  if configurations is not None:
+    cluster_env = get_property_value(configurations, 'cluster-env')
+
+    if cluster_env is not None:
+      smoke_test_principal = get_property_value(cluster_env, 'smokeuser_principal_name', None, True, None)
+      smoke_test_keytab_file = get_property_value(cluster_env, 'smokeuser_keytab', None, True, None)
+      smoke_user = get_property_value(cluster_env, 'smokeuser', smoke_user, True, smoke_user)
+
+      default_group = get_property_value(cluster_env, 'user_group')
+
+      if default_group is None:
+        default_group = get_property_value(cluster_env, 'user-group')
+
+    # ##############################################################################################
+    # Get krb5.conf template data
+    # ##############################################################################################
+    realm = 'EXAMPLE.COM'
+    domains = ''
+    kdc_hosts = 'localhost'
+    admin_server_host = None
+    admin_principal = None
+    admin_password = None
+    admin_keytab = None
+    test_principal = None
+    test_password = None
+    test_keytab = None
+    test_keytab_file = None
+    encryption_types = None
+    manage_krb5_conf = "true"
+    krb5_conf_template = None
+
+    krb5_conf_data = get_property_value(configurations, 'krb5-conf')
+
+    kerberos_env = get_property_value(configurations, "kerberos-env")
+
+    if kerberos_env is not None:
+      manage_identities = get_property_value(kerberos_env, "manage_identities", "true", True, "true")
+      encryption_types = get_property_value(kerberos_env, "encryption_types", None, True, None)
+      realm = get_property_value(kerberos_env, "realm", None, True, None)
+      kdc_hosts = get_property_value(kerberos_env, 'kdc_hosts', kdc_hosts)
+      admin_server_host = get_property_value(kerberos_env, 'admin_server_host', admin_server_host)
+
+    if krb5_conf_data is not None:
+      realm = get_property_value(krb5_conf_data, 'realm', realm)
+      domains = get_property_value(krb5_conf_data, 'domains', domains)
+
+      admin_principal = get_property_value(krb5_conf_data, 'admin_principal', admin_principal, True, None)
+      admin_password = get_property_value(krb5_conf_data, 'admin_password', admin_password, True, None)
+      admin_keytab = get_property_value(krb5_conf_data, 'admin_keytab', admin_keytab, True, None)
+
+      test_principal = get_property_value(krb5_conf_data, 'test_principal', test_principal, True, None)
+      test_password = get_property_value(krb5_conf_data, 'test_password', test_password, True, None)
+      test_keytab = get_property_value(krb5_conf_data, 'test_keytab', test_keytab, True, None)
+      test_keytab_file = get_property_value(krb5_conf_data, 'test_keytab_file', test_keytab_file, True, None)
+
+      krb5_conf_template = get_property_value(krb5_conf_data, 'content', krb5_conf_template)
+      krb5_conf_dir = get_property_value(krb5_conf_data, 'conf_dir', krb5_conf_dir)
+      krb5_conf_file = get_property_value(krb5_conf_data, 'conf_file', krb5_conf_file)
+      krb5_conf_path = krb5_conf_dir + '/' + krb5_conf_file
+
+      manage_krb5_conf = get_property_value(krb5_conf_data, 'manage_krb5_conf', "true")
+
+    # For backward compatibility, ensure that kdc_host exists. This may be needed if the krb5.conf
+    # template in krb5-conf/content had not be updated during the Ambari upgrade to 2.4.0 - which
+    # will happen if the template was altered from its stack-default value.
+    kdc_host_parts = kdc_hosts.split(',')
+    if kdc_host_parts:
+      kdc_host = kdc_host_parts[0]
+    else:
+      kdc_host = kdc_hosts
+
+    # ##############################################################################################
+    # Get kdc.conf template data
+    # ##############################################################################################
+    kdcdefaults_kdc_ports = "88"
+    kdcdefaults_kdc_tcp_ports = "88"
+
+    kdc_conf_template = None
+
+    kdc_conf_data = get_property_value(configurations, 'kdc-conf')
+
+    if kdc_conf_data is not None:
+      kdcdefaults_kdc_ports = get_property_value(kdc_conf_data, 'kdcdefaults_kdc_ports', kdcdefaults_kdc_ports)
+      kdcdefaults_kdc_tcp_ports = get_property_value(kdc_conf_data, 'kdcdefaults_kdc_tcp_ports', kdcdefaults_kdc_tcp_ports)
+
+      kdc_conf_template = get_property_value(kdc_conf_data, 'content', kdc_conf_template)
+      kdc_conf_dir = get_property_value(kdc_conf_data, 'conf_dir', kdc_conf_dir)
+      kdc_conf_file = get_property_value(kdc_conf_data, 'conf_file', kdc_conf_file)
+      kdc_conf_path = kdc_conf_dir + '/' + kdc_conf_file
+
+    # ##############################################################################################
+    # Get kadm5.acl template data
+    # ##############################################################################################
+    kdcdefaults_kdc_ports = '88'
+    kdcdefaults_kdc_tcp_ports = '88'
+
+    kadm5_acl_template = None
+
+    kadm5_acl_data = get_property_value(configurations, 'kadm5-acl')
+
+    if kadm5_acl_data is not None:
+      kadm5_acl_template = get_property_value(kadm5_acl_data, 'content', kadm5_acl_template)
+      kadm5_acl_dir = get_property_value(kadm5_acl_data, 'conf_dir', kadm5_acl_dir)
+      kadm5_acl_file = get_property_value(kadm5_acl_data, 'conf_file', kadm5_acl_file)
+      kadm5_acl_path = kadm5_acl_dir + '/' + kadm5_acl_file
+
+  # ################################################################################################
+  # Get commandParams
+  # ################################################################################################
+  command_params = get_property_value(config, 'commandParams')
+  if command_params is not None:
+    keytab_details = get_unstructured_data(command_params, 'keytab')
+
+    if manage_identities:
+      smoke_test_principal = get_property_value(command_params, 'principal_name', smoke_test_principal)
+      smoke_test_keytab_file = get_property_value(command_params, 'keytab_file', smoke_test_keytab_file)

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/package/scripts/service_check.py
new file mode 100755
index 0000000..7c09171
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/package/scripts/service_check.py
@@ -0,0 +1,81 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from kerberos_common import *
+from resource_management import *
+
+# hashlib is supplied as of Python 2.5 as the replacement interface for md5
+# and other secure hashes.  In 2.6, md5 is deprecated.  Import hashlib if
+# available, avoiding a deprecation warning under 2.6.  Import md5 otherwise,
+# preserving 2.4 compatibility.
+try:
+  import hashlib
+  _md5 = hashlib.md5
+except ImportError:
+  import md5
+  _md5 = md5.new
+
+class KerberosServiceCheck(KerberosScript):
+  def service_check(self, env):
+    import params
+
+    # If Ambari IS managing Kerberos identities (kerberos-env/manage_identities = true), it is
+    # expected that a (smoke) test principal and its associated keytab file is available for use
+    # **  If not available, this service check will fail
+    # **  If available, this service check will execute
+    #
+    # If Ambari IS NOT managing Kerberos identities (kerberos-env/manage_identities = false), the
+    # smoke test principal and its associated keytab file may not be available
+    # **  If not available, this service check will execute
+    # **  If available, this service check will execute
+
+    if ((params.smoke_test_principal is not None) and
+          (params.smoke_test_keytab_file is not None) and
+          os.path.isfile(params.smoke_test_keytab_file)):
+      print "Performing kinit using %s" % params.smoke_test_principal
+
+      ccache_file_name = _md5("{0}|{1}".format(params.smoke_test_principal,params.smoke_test_keytab_file)).hexdigest()
+      ccache_file_path = "{0}{1}kerberos_service_check_cc_{2}".format(params.tmp_dir, os.sep, ccache_file_name)
+
+      kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+      kinit_command = "{0} -c {1} -kt {2} {3}".format(kinit_path_local, ccache_file_path, params.smoke_test_keytab_file, params.smoke_test_principal)
+
+      try:
+        # kinit
+        Execute(kinit_command,
+                user=params.smoke_user
+        )
+      finally:
+        File(ccache_file_path, # Since kinit might fail to write to the cache file for various reasons, an existence check should be done before cleanup
+             action = "delete",
+        )
+    elif params.manage_identities:
+      err_msg = Logger.filter_text("Failed to execute kinit test due to principal or keytab not found or available")
+      raise Fail(err_msg)
+    else:
+      # Ambari is not managing identities so if the smoke user does not exist, indicate why....
+      print "Skipping this service check since Ambari is not managing Kerberos identities and the smoke user " \
+            "credentials are not available. To execute this service check, the smoke user principal name " \
+            "and keytab file location must be set in the cluster_env and the smoke user's keytab file must" \
+            "exist in the configured location."
+
+if __name__ == "__main__":
+  KerberosServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/package/scripts/status_params.py
new file mode 100755
index 0000000..bbae4a3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/package/scripts/status_params.py
@@ -0,0 +1,32 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+hostname = config['hostname']
+kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+smoke_user = config['configurations']['cluster-env']['smokeuser']
+smoke_user_principal = config['configurations']['cluster-env']['smokeuser_principal_name']

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/package/scripts/utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/package/scripts/utils.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/package/scripts/utils.py
new file mode 100755
index 0000000..199e6d7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/package/scripts/utils.py
@@ -0,0 +1,105 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+def get_property_value(dictionary, property_name, default_value=None, trim_string=False,
+                       empty_value=""):
+  """
+  Get a property value from a dictionary, applying applying rules as necessary.
+
+  If dictionary does not contain a value for property_name or the value for property_name is None,
+  null_value is used as the value to return.  Then, if trim_string is True and the value is None
+  or the value is an empty string, empty_value will be return else the (current) value is returned.
+
+  Note: the property value will most likely be a string or a unicode string, however in the event
+  it is not (for example a number), this method will behave properly and return the value as is.
+
+  :param dictionary: a dictionary of values
+  :param property_name: the name of a dictionary item to retrieve
+  :param default_value: the value to use if the item is not in the dictionary or the value of the item is None
+  :param trim_string: a Boolean value indicating whether to strip whitespace from the value (True) or not (False)
+  :param empty_value: the value to use if the (current) value is None or an empty string, if trim_string is True
+  :return: the requested property value with rules applied
+  """
+  # If property_name is not in the dictionary, set value to null_value
+  if property_name in dictionary:
+    value = dictionary[property_name]
+    if value is None:
+      value = default_value
+  else:
+    value = default_value
+
+  if trim_string:
+    # If the value is none, consider it empty...
+    if value is None:
+      value = empty_value
+    elif (type(value) == str) or (type(value) == unicode):
+      value = value.strip()
+
+      if len(value) == 0:
+        value = empty_value
+
+  return value
+
+def get_unstructured_data(dictionary, property_name):
+  prefix = property_name + '/'
+  prefix_len = len(prefix)
+  return dict((k[prefix_len:], v) for k, v in dictionary.iteritems() if k.startswith(prefix))
+
+def split_host_and_port(host):
+  """
+  Splits a string into its host and port components
+
+  :param host: a string matching the following pattern: <host name | ip address>[:port]
+  :return: a Dictionary containing 'host' and 'port' entries for the input value
+  """
+
+  if host is None:
+    host_and_port = None
+  else:
+    host_and_port = {}
+    parts = host.split(":")
+
+    if parts is not None:
+      length = len(parts)
+
+      if length > 0:
+        host_and_port['host'] = parts[0]
+
+        if length > 1:
+          host_and_port['port'] = int(parts[1])
+
+  return host_and_port
+
+def set_port(host, port):
+  """
+  Sets the port for a host specification, potentially replacing an existing port declaration
+
+  :param host: a string matching the following pattern: <host name | ip address>[:port]
+  :param port: a string or integer declaring the (new) port
+  :return: a string declaring the new host/port specification
+  """
+  if port is None:
+    return host
+  else:
+    host_and_port = split_host_and_port(host)
+
+    if (host_and_port is not None) and ('host' in host_and_port):
+      return "%s:%s" % (host_and_port['host'], port)
+    else:
+      return host

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/package/templates/kadm5_acl.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/package/templates/kadm5_acl.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/package/templates/kadm5_acl.j2
new file mode 100755
index 0000000..d82ae23
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/package/templates/kadm5_acl.j2
@@ -0,0 +1,20 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+*/admin@{{realm}}	*
+
+{# Append additional realm declarations should be placed below #}


[17/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/MAPREDUCE2_metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/MAPREDUCE2_metrics.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/MAPREDUCE2_metrics.json
new file mode 100755
index 0000000..f44e3b2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/MAPREDUCE2_metrics.json
@@ -0,0 +1,2596 @@
+{
+  "HISTORYSERVER": {
+    "Component": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "default": {
+            "metrics/jvm/memHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsRunnable": {
+              "metric": "jvm.JvmMetrics.ThreadsRunnable",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsNew": {
+              "metric": "jvm.JvmMetrics.ThreadsNew",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthorizationFailures": {
+              "metric": "rpc.metrics.RpcAuthorizationFailures",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_avg_time": {
+              "metric": "ugi.ugi.LoginSuccessAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_avg_time": {
+              "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/SentBytes": {
+              "metric": "rpc.rpc.SentBytes",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logWarn": {
+              "metric": "jvm.JvmMetrics.LogWarn",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTimedWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/process/proc_run": {
+              "metric": "proc_run",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/gcCount": {
+              "metric": "jvm.JvmMetrics.GcCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/ReceivedBytes": {
+              "metric": "rpc.rpc.ReceivedBytes",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/memory/swap_total": {
+              "metric": "swap_total",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/threadsBlocked": {
+              "metric": "jvm.JvmMetrics.ThreadsBlocked",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_num_ops": {
+              "metric": "rpc.rpc.RpcQueueTimeNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/process/proc_total": {
+              "metric": "proc_total",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/disk/part_max_used": {
+              "metric": "part_max_used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/NumOpenConnections": {
+              "metric": "rpc.rpc.NumOpenConnections",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemHeapUsedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsWaiting",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/memory/mem_buffers": {
+              "metric": "mem_buffers",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_num_ops": {
+              "metric": "ugi.ugi.LoginSuccessNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/gcTimeMillis": {
+              "metric": "jvm.JvmMetrics.GcTimeMillis",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTerminated": {
+              "metric": "jvm.JvmMetrics.ThreadsTerminated",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/network/bytes_out": {
+              "metric": "bytes_out",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/cpu/cpu_aidle": {
+              "metric": "cpu_aidle",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/network/bytes_in": {
+              "metric": "bytes_in",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/boottime": {
+              "metric": "boottime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/network/pkts_out": {
+              "metric": "pkts_out",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/callQueueLen": {
+              "metric": "rpc.rpc.CallQueueLength",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logInfo": {
+              "metric": "jvm.JvmMetrics.LogInfo",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_num_ops": {
+              "metric": "ugi.ugi.LoginFailureNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_num_ops": {
+              "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logError": {
+              "metric": "jvm.JvmMetrics.LogError",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_avg_time": {
+              "metric": "ugi.ugi.LoginFailureAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthorizationSuccesses": {
+              "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logFatal": {
+              "metric": "jvm.JvmMetrics.LogFatal",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_avg_time": {
+              "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthenticationSuccesses": {
+              "metric": "rpc.metrics.RpcAuthenticationSuccesses",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthenticationFailures": {
+              "metric": "rpc.metrics.RpcAuthenticationFailures",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/network/pkts_in": {
+              "metric": "pkts_in",
+              "pointInTime": true,
+              "temporal": true
+            }
+          }
+        }
+      },
+      {
+        "type": "jmx",
+        "metrics": {
+          "default": {
+            "metrics/rpc/ReceivedBytes": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.ReceivedBytes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/SentBytes": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.SentBytes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTimeNumOps": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcQueueTimeNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTimeAvgTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcQueueTimeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcProcessingTimeNumOps": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcProcessingTimeNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcProcessingTimeAvgTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcProcessingTimeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcAuthenticationFailures": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthenticationFailures",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcAuthenticationSuccesses": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthenticationSuccesses",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcAuthorizationFailures": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthorizationFailures",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcAuthorizationSuccesses": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthorizationSuccesses",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/NumOpenConnections": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.NumOpenConnections",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/CallQueueLength": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.CallQueueLength",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/MemNonHeapUsedM": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemNonHeapUsedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/MemNonHeapCommittedM": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemNonHeapCommittedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/MemHeapUsedM": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemHeapUsedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/MemHeapCommittedM": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemHeapCommittedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/MemMaxM": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemMaxM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/GcCountCopy": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcCountCopy",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/GcTimeMillisCopy": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcTimeMillisCopy",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/GcCountMarkSweepCompact": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcCountMarkSweepCompact",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/GcTimeMillisMarkSweepCompact": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcTimeMillisMarkSweepCompact",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/GcCount": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/GcTimeMillis": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcTimeMillis",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/ThreadsNew": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsNew",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/ThreadsRunnable": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsRunnable",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/ThreadsBlocked": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsBlocked",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/ThreadsWaiting": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsWaiting",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/ThreadsTimedWaiting": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsTimedWaiting",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/ThreadsTerminated": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsTerminated",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/LogFatal": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogFatal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/LogError": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogError",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/LogWarn": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogWarn",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/LogInfo": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogInfo",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/HeapMemoryMax": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/HeapMemoryUsed": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/HeapMemoryCommitted": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[committed]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/HeapMemoryInit": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[init]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/NonHeapMemoryMax": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/NonHeapMemoryUsed": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/NonHeapMemoryCommitted": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[committed]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/NonHeapMemoryInit": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[init]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/MBeanServerId": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.MBeanServerId",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/SpecificationName": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.SpecificationName",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/SpecificationVersion": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.SpecificationVersion",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/SpecificationVendor": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.SpecificationVendor",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/ImplementationName": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.ImplementationName",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/ImplementationVersion": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.ImplementationVersion",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/ImplementationVendor": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.ImplementationVendor",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/ElapsedTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.ElapsedTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/PercentComplete": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.PercentComplete",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingFsImageCount": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImageCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingFsImageElapsedTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImageElapsedTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingFsImageTotal": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImageTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingFsImagePercentComplete": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImagePercentComplete",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingEditsCount": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingEditsElapsedTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsElapsedTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingEditsTotal": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingEditsPercentComplete": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsPercentComplete",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SavingCheckpointCount": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SavingCheckpointElapsedTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointElapsedTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SavingCheckpointTotal": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SavingCheckpointPercentComplete": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointPercentComplete",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SafeModeCount": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModeCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SafeModeElapsedTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModeElapsedTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SafeModeTotal": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModeTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SafeModePercentComplete": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModePercentComplete",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/HotSpotDiagnostic/DiagnosticOptions": {
+              "metric": "com.sun.management:type=HotSpotDiagnostic.DiagnosticOptions",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryManager/MemoryPoolNames": {
+              "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.MemoryPoolNames",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryManager/Name": {
+              "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryManager/Valid": {
+              "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.Valid",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryManager/ObjectName": {
+              "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.ObjectName",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Logging/LoggerNames": {
+              "metric": "java.util.logging:type=Logging.LoggerNames",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/UgiMetrics/LoginSuccessNumOps": {
+              "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginSuccessNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/UgiMetrics/LoginSuccessAvgTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginSuccessAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/UgiMetrics/LoginFailureNumOps": {
+              "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginFailureNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/UgiMetrics/LoginFailureAvgTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginFailureAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/CollectionUsage": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/CollectionUsageThreshold": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThreshold",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/CollectionUsageThresholdCount": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThresholdCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/MemoryManagerNames": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.MemoryManagerNames",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/PeakUsage": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.PeakUsage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/Usage": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.Usage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/CollectionUsageThresholdExceeded": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThresholdExceeded",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/CollectionUsageThresholdSupported": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThresholdSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/UsageThresholdSupported": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.UsageThresholdSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/Name": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/Type": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.Type",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/Valid": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.Valid",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadAllocatedMemoryEnabled": {
+              "metric": "java.lang:type=Threading.ThreadAllocatedMemoryEnabled",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadAllocatedMemorySupported": {
+              "metric": "java.lang:type=Threading.ThreadAllocatedMemorySupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/DaemonThreadCount": {
+              "metric": "java.lang:type=Threading.DaemonThreadCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/PeakThreadCount": {
+              "metric": "java.lang:type=Threading.PeakThreadCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/CurrentThreadCpuTimeSupported": {
+              "metric": "java.lang:type=Threading.CurrentThreadCpuTimeSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ObjectMonitorUsageSupported": {
+              "metric": "java.lang:type=Threading.ObjectMonitorUsageSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/SynchronizerUsageSupported": {
+              "metric": "java.lang:type=Threading.SynchronizerUsageSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadContentionMonitoringSupported": {
+              "metric": "java.lang:type=Threading.ThreadContentionMonitoringSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadCpuTimeEnabled": {
+              "metric": "java.lang:type=Threading.ThreadCpuTimeEnabled",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/CurrentThreadCpuTime": {
+              "metric": "java.lang:type=Threading.CurrentThreadCpuTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/CurrentThreadUserTime": {
+              "metric": "java.lang:type=Threading.CurrentThreadUserTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadCount": {
+              "metric": "java.lang:type=Threading.ThreadCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/TotalStartedThreadCount": {
+              "metric": "java.lang:type=Threading.TotalStartedThreadCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadCpuTimeSupported": {
+              "metric": "java.lang:type=Threading.ThreadCpuTimeSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadContentionMonitoringEnabled": {
+              "metric": "java.lang:type=Threading.ThreadContentionMonitoringEnabled",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/AllThreadIds": {
+              "metric": "java.lang:type=Threading.AllThreadIds",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/ClassLoading/LoadedClassCount": {
+              "metric": "java.lang:type=ClassLoading.LoadedClassCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/ClassLoading/UnloadedClassCount": {
+              "metric": "java.lang:type=ClassLoading.UnloadedClassCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/ClassLoading/TotalLoadedClassCount": {
+              "metric": "java.lang:type=ClassLoading.TotalLoadedClassCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/ClassLoading/Verbose": {
+              "metric": "java.lang:type=ClassLoading.Verbose",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/BootClassPath": {
+              "metric": "java.lang:type=Runtime.BootClassPath",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/LibraryPath": {
+              "metric": "java.lang:type=Runtime.LibraryPath",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/VmName": {
+              "metric": "java.lang:type=Runtime.VmName",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/VmVendor": {
+              "metric": "java.lang:type=Runtime.VmVendor",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/VmVersion": {
+              "metric": "java.lang:type=Runtime.VmVersion",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/BootClassPathSupported": {
+              "metric": "java.lang:type=Runtime.BootClassPathSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/StartTime": {
+              "metric": "java.lang:type=Runtime.StartTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/InputArguments": {
+              "metric": "java.lang:type=Runtime.InputArguments",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/ManagementSpecVersion": {
+              "metric": "java.lang:type=Runtime.ManagementSpecVersion",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/SpecName": {
+              "metric": "java.lang:type=Runtime.SpecName",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/SpecVendor": {
+              "metric": "java.lang:type=Runtime.SpecVendor",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/SpecVersion": {
+              "metric": "java.lang:type=Runtime.SpecVersion",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/SystemProperties": {
+              "metric": "java.lang:type=Runtime.SystemProperties",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/Uptime": {
+              "metric": "java.lang:type=Runtime.Uptime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/Name": {
+              "metric": "java.lang:type=Runtime.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Runtime/ClassPath": {
+              "metric": "java.lang:type=Runtime.ClassPath",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/MaxFileDescriptorCount": {
+              "metric": "java.lang:type=OperatingSystem.MaxFileDescriptorCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/OpenFileDescriptorCount": {
+              "metric": "java.lang:type=OperatingSystem.OpenFileDescriptorCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/CommittedVirtualMemorySize": {
+              "metric": "java.lang:type=OperatingSystem.CommittedVirtualMemorySize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/FreePhysicalMemorySize": {
+              "metric": "java.lang:type=OperatingSystem.FreePhysicalMemorySize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/FreeSwapSpaceSize": {
+              "metric": "java.lang:type=OperatingSystem.FreeSwapSpaceSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/ProcessCpuLoad": {
+              "metric": "java.lang:type=OperatingSystem.ProcessCpuLoad",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/ProcessCpuTime": {
+              "metric": "java.lang:type=OperatingSystem.ProcessCpuTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/SystemCpuLoad": {
+              "metric": "java.lang:type=OperatingSystem.SystemCpuLoad",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/TotalPhysicalMemorySize": {
+              "metric": "java.lang:type=OperatingSystem.TotalPhysicalMemorySize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/TotalSwapSpaceSize": {
+              "metric": "java.lang:type=OperatingSystem.TotalSwapSpaceSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/AvailableProcessors": {
+              "metric": "java.lang:type=OperatingSystem.AvailableProcessors",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/Version": {
+              "metric": "java.lang:type=OperatingSystem.Version",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/Arch": {
+              "metric": "java.lang:type=OperatingSystem.Arch",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/SystemLoadAverage": {
+              "metric": "java.lang:type=OperatingSystem.SystemLoadAverage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/OperatingSystem/Name": {
+              "metric": "java.lang:type=OperatingSystem.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/CollectionUsage": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.CollectionUsage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/CollectionUsageThreshold": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.CollectionUsageThreshold",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/CollectionUsageThresholdCount": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.CollectionUsageThresholdCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/MemoryManagerNames": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.MemoryManagerNames",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/PeakUsage": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.PeakUsage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/Usage": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.Usage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/UsageThreshold": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.UsageThreshold",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/UsageThresholdCount": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.UsageThresholdCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/CollectionUsageThresholdExceeded": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.CollectionUsageThresholdExceeded",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/CollectionUsageThresholdSupported": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.CollectionUsageThresholdSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/UsageThresholdExceeded": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.UsageThresholdExceeded",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/UsageThresholdSupported": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.UsageThresholdSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/Name": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/Type": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.Type",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/PermGen/Valid": {
+              "metric": "java.lang:type=MemoryPool,name=Perm Gen.Valid",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/BufferPool/mapred/Count": {
+              "metric": "java.nio:type=BufferPool,name=mapped.Count",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/BufferPool/mapred/MemoryUsed": {
+              "metric": "java.nio:type=BufferPool,name=mapped.MemoryUsed",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/BufferPool/mapred/TotalCapacity": {
+              "metric": "java.nio:type=BufferPool,name=mapped.TotalCapacity",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/BufferPool/mapred/Name": {
+              "metric": "java.nio:type=BufferPool,name=mapped.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/BufferPool/direct/Count": {
+              "metric": "java.nio:type=BufferPool,name=direct.Count",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/BufferPool/direct/MemoryUsed": {
+              "metric": "java.nio:type=BufferPool,name=direct.MemoryUsed",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/BufferPool/direct/TotalCapacity": {
+              "metric": "java.nio:type=BufferPool,name=direct.TotalCapacity",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/BufferPool/direct/Name": {
+              "metric": "java.nio:type=BufferPool,name=direct.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/GarbageCollector/MarkSweepCompact/LastGcInfo": {
+              "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.LastGcInfo",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/GarbageCollector/MarkSweepCompact/CollectionCount": {
+              "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.CollectionCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/GarbageCollector/MarkSweepCompact/CollectionTime": {
+              "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.CollectionTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/GarbageCollector/MarkSweepCompact/MemoryPoolNames": {
+              "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.MemoryPoolNames",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/GarbageCollector/MarkSweepCompact/Name": {
+              "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/GarbageCollector/MarkSweepCompact/Valid": {
+              "metric": "java.lang:type=GarbageCollector,name=MarkSweepCompact.Valid",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/GarbageCollector/Copy/LastGcInfo": {
+              "metric": "java.lang:type=GarbageCollector,name=Copy.LastGcInfo",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/GarbageCollector/Copy/CollectionCount": {
+              "metric": "java.lang:type=GarbageCollector,name=Copy.CollectionCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/GarbageCollector/Copy/CollectionTime": {
+              "metric": "java.lang:type=GarbageCollector,name=Copy.CollectionTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/GarbageCollector/Copy/MemoryPoolNames": {
+              "metric": "java.lang:type=GarbageCollector,name=Copy.MemoryPoolNames",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/GarbageCollector/Copy/Name": {
+              "metric": "java.lang:type=GarbageCollector,name=Copy.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/GarbageCollector/Copy/Valid": {
+              "metric": "java.lang:type=GarbageCollector,name=Copy.Valid",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/CodeCache/CollectionUsage": {
+              "metric": "java.lang:type=MemoryPool,name=Code Cache.CollectionUsage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/CodeCache/MemoryManagerNames": {
+              "metric": "java.lang:type=MemoryPool,name=Code Cache.MemoryManagerNames",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/CodeCache/PeakUsage": {
+              "metric": "java.lang:type=MemoryPool,name=Code Cache.PeakUsage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/CodeCache/Usage": {
+              "metric": "java.lang:type=MemoryPool,name=Code Cache.Usage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/CodeCache/UsageThreshold": {
+              "metric": "java.lang:type=MemoryPool,name=Code Cache.UsageThreshold",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/CodeCache/UsageThresholdCount": {
+              "metric": "java.lang:type=MemoryPool,name=Code Cache.UsageThresholdCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/CodeCache/CollectionUsageThresholdSupported": {
+              "metric": "java.lang:type=MemoryPool,name=Code Cache.CollectionUsageThresholdSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/CodeCache/UsageThresholdExceeded": {
+              "metric": "java.lang:type=MemoryPool,name=Code Cache.UsageThresholdExceeded",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/CodeCache/UsageThresholdSupported": {
+              "metric": "java.lang:type=MemoryPool,name=Code Cache.UsageThresholdSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/CodeCache/Name": {
+              "metric": "java.lang:type=MemoryPool,name=Code Cache.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/CodeCache/Type": {
+              "metric": "java.lang:type=MemoryPool,name=Code Cache.Type",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/CodeCache/Valid": {
+              "metric": "java.lang:type=MemoryPool,name=Code Cache.Valid",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/EdenSpace/CollectionUsage": {
+              "metric": "java.lang:type=MemoryPool,name=Eden Space.CollectionUsage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/EdenSpace/CollectionUsageThreshold": {
+              "metric": "java.lang:type=MemoryPool,name=Eden Space.CollectionUsageThreshold",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/EdenSpace/CollectionUsageThresholdCount": {
+              "metric": "java.lang:type=MemoryPool,name=Eden Space.CollectionUsageThresholdCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/EdenSpace/MemoryManagerNames": {
+              "metric": "java.lang:type=MemoryPool,name=Eden Space.MemoryManagerNames",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/EdenSpace/PeakUsage": {
+              "metric": "java.lang:type=MemoryPool,name=Eden Space.PeakUsage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/EdenSpace/Usage": {
+              "metric": "java.lang:type=MemoryPool,name=Eden Space.Usage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/EdenSpace/CollectionUsageThresholdSupported": {
+              "metric": "java.lang:type=MemoryPool,name=Eden Space.CollectionUsageThresholdSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/EdenSpace/CollectionUsageThresholdExceeded": {
+              "metric": "java.lang:type=MemoryPool,name=Eden Space.CollectionUsageThresholdExceeded",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/EdenSpace/UsageThresholdSupported": {
+              "metric": "java.lang:type=MemoryPool,name=Eden Space.UsageThresholdSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/EdenSpace/Name": {
+              "metric": "java.lang:type=MemoryPool,name=Eden Space.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/EdenSpace/Type": {
+              "metric": "java.lang:type=MemoryPool,name=Eden Space.Type",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/EdenSpace/Valid": {
+              "metric": "java.lang:type=MemoryPool,name=Eden Space.Valid",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/CollectionUsage": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.CollectionUsage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/CollectionUsageThreshold": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.CollectionUsageThreshold",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/CollectionUsageThresholdCount": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.CollectionUsageThresholdCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/MemoryManagerNames": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.MemoryManagerNames",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/PeakUsage": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.PeakUsage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/Usage": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.Usage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/UsageThreshold": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.UsageThreshold",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/UsageThresholdCount": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.UsageThresholdCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/CollectionUsageThresholdSupported": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.CollectionUsageThresholdSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/CollectionUsageThresholdExceeded": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.CollectionUsageThresholdExceeded",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/UsageThresholdSupported": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.UsageThresholdSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/UsageThresholdExceeded": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.UsageThresholdExceeded",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/Name": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/Type": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.Type",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/TenuredGen/Valid": {
+              "metric": "java.lang:type=MemoryPool,name=Tenured Gen.Valid",
+              "pointInTime": true,
+              "temporal": false
+            }
+          }
+        }
+      }
+    ],
+    "HostComponent": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "default": {
+            "metrics/jvm/memHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsRunnable": {
+              "metric": "jvm.JvmMetrics.ThreadsRunnable",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsNew": {
+              "metric": "jvm.JvmMetrics.ThreadsNew",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthorizationFailures": {
+              "metric": "rpc.metrics.RpcAuthorizationFailures",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_avg_time": {
+              "metric": "ugi.ugi.LoginSuccessAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_avg_time": {
+              "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/SentBytes": {
+              "metric": "rpc.rpc.SentBytes",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logWarn": {
+              "metric": "jvm.JvmMetrics.LogWarn",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTimedWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/process/proc_run": {
+              "metric": "proc_run",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/gcCount": {
+              "metric": "jvm.JvmMetrics.GcCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/ReceivedBytes": {
+              "metric": "rpc.rpc.ReceivedBytes",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/memory/swap_total": {
+              "metric": "swap_total",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/threadsBlocked": {
+              "metric": "jvm.JvmMetrics.ThreadsBlocked",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_num_ops": {
+              "metric": "rpc.rpc.RpcQueueTimeNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/process/proc_total": {
+              "metric": "proc_total",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/disk/part_max_used": {
+              "metric": "part_max_used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/NumOpenConnections": {
+              "metric": "rpc.rpc.NumOpenConnections",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemHeapUsedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsWaiting",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/memory/mem_buffers": {
+              "metric": "mem_buffers",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_num_ops": {
+              "metric": "ugi.ugi.LoginSuccessNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/gcTimeMillis": {
+              "metric": "jvm.JvmMetrics.GcTimeMillis",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTerminated": {
+              "metric": "jvm.JvmMetrics.ThreadsTerminated",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/network/bytes_out": {
+              "metric": "bytes_out",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/cpu/cpu_aidle": {
+              "metric": "cpu_aidle",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/network/bytes_in": {
+              "metric": "bytes_in",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/boottime": {
+              "metric": "boottime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/network/pkts_out": {
+              "metric": "pkts_out",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/callQueueLen": {
+              "metric": "rpc.rpc.CallQueueLength",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logInfo": {
+              "metric": "jvm.JvmMetrics.LogInfo",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_num_ops": {
+              "metric": "ugi.ugi.LoginFailureNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_num_ops": {
+              "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logError": {
+              "metric": "jvm.JvmMetrics.LogError",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_avg_time": {
+              "metric": "ugi.ugi.LoginFailureAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthorizationSuccesses": {
+              "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logFatal": {
+              "metric": "jvm.JvmMetrics.LogFatal",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_avg_time": {
+              "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthenticationSuccesses": {
+              "metric": "rpc.metrics.RpcAuthenticationSuccesses",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthenticationFailures": {
+              "metric": "rpc.metrics.RpcAuthenticationFailures",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/network/pkts_in": {
+              "metric": "pkts_in",
+              "pointInTime": true,
+              "temporal": true
+            }
+          }
+        }
+      },
+      {
+        "type": "jmx",
+        "metrics": {
+          "default": {
+            "metrics/rpc/ReceivedBytes": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.ReceivedBytes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/SentBytes": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.SentBytes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTimeNumOps": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcQueueTimeNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTimeAvgTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcQueueTimeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcProcessingTimeNumOps": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcProcessingTimeNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcProcessingTimeAvgTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcProcessingTimeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcAuthenticationFailures": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthenticationFailures",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcAuthenticationSuccesses": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthenticationSuccesses",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcAuthorizationFailures": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthorizationFailures",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcAuthorizationSuccesses": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.RpcAuthorizationSuccesses",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/NumOpenConnections": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.NumOpenConnections",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/CallQueueLength": {
+              "metric": "Hadoop:service=JobHistoryServer,name=RpcActivity.CallQueueLength",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/MemNonHeapUsedM": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemNonHeapUsedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/MemNonHeapCommittedM": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemNonHeapCommittedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/MemHeapUsedM": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemHeapUsedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/MemHeapCommittedM": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemHeapCommittedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/MemMaxM": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.MemMaxM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/GcCountCopy": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcCountCopy",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/GcTimeMillisCopy": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcTimeMillisCopy",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/GcCountMarkSweepCompact": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcCountMarkSweepCompact",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/GcTimeMillisMarkSweepCompact": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcTimeMillisMarkSweepCompact",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/GcCount": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/GcTimeMillis": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.GcTimeMillis",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/ThreadsNew": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsNew",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/ThreadsRunnable": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsRunnable",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/ThreadsBlocked": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsBlocked",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/ThreadsWaiting": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsWaiting",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/ThreadsTimedWaiting": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsTimedWaiting",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/ThreadsTerminated": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.ThreadsTerminated",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/LogFatal": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogFatal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/LogError": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogError",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/LogWarn": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogWarn",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/LogInfo": {
+              "metric": "Hadoop:service=JobHistoryServer,name=JvmMetrics.LogInfo",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/HeapMemoryMax": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/HeapMemoryUsed": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/HeapMemoryCommitted": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[committed]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/HeapMemoryInit": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[init]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/NonHeapMemoryMax": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/NonHeapMemoryUsed": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/NonHeapMemoryCommitted": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[committed]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Memory/NonHeapMemoryInit": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[init]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/MBeanServerId": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.MBeanServerId",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/SpecificationName": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.SpecificationName",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/SpecificationVersion": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.SpecificationVersion",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/SpecificationVendor": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.SpecificationVendor",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/ImplementationName": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.ImplementationName",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/ImplementationVersion": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.ImplementationVersion",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MBeanServerDelegate/ImplementationVendor": {
+              "metric": "JMImplementation:type=MBeanServerDelegate.ImplementationVendor",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/ElapsedTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.ElapsedTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/PercentComplete": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.PercentComplete",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingFsImageCount": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImageCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingFsImageElapsedTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImageElapsedTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingFsImageTotal": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImageTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingFsImagePercentComplete": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingFsImagePercentComplete",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingEditsCount": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingEditsElapsedTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsElapsedTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingEditsTotal": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/LoadingEditsPercentComplete": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.LoadingEditsPercentComplete",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SavingCheckpointCount": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SavingCheckpointElapsedTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointElapsedTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SavingCheckpointTotal": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SavingCheckpointPercentComplete": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SavingCheckpointPercentComplete",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SafeModeCount": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModeCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SafeModeElapsedTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModeElapsedTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SafeModeTotal": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModeTotal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/StartupProgress/SafeModePercentComplete": {
+              "metric": "Hadoop:service=JobHistoryServer,name=StartupProgress.SafeModePercentComplete",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/HotSpotDiagnostic/DiagnosticOptions": {
+              "metric": "com.sun.management:type=HotSpotDiagnostic.DiagnosticOptions",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryManager/MemoryPoolNames": {
+              "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.MemoryPoolNames",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryManager/Name": {
+              "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryManager/Valid": {
+              "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.Valid",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryManager/ObjectName": {
+              "metric": "java.lang:type=MemoryManager,name=CodeCacheManager.ObjectName",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Logging/LoggerNames": {
+              "metric": "java.util.logging:type=Logging.LoggerNames",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/UgiMetrics/LoginSuccessNumOps": {
+              "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginSuccessNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/UgiMetrics/LoginSuccessAvgTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginSuccessAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/UgiMetrics/LoginFailureNumOps": {
+              "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginFailureNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/UgiMetrics/LoginFailureAvgTime": {
+              "metric": "Hadoop:service=JobHistoryServer,name=UgiMetrics.LoginFailureAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/CollectionUsage": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/CollectionUsageThreshold": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThreshold",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/CollectionUsageThresholdCount": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThresholdCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/MemoryManagerNames": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.MemoryManagerNames",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/PeakUsage": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.PeakUsage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/Usage": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.Usage",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/CollectionUsageThresholdExceeded": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThresholdExceeded",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/CollectionUsageThresholdSupported": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.CollectionUsageThresholdSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/UsageThresholdSupported": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.UsageThresholdSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/Name": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.Name",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/Type": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.Type",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/MemoryPool/SurvivorSpace/Valid": {
+              "metric": "java.lang:type=MemoryPool,name=Survivor Space.Valid",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadAllocatedMemoryEnabled": {
+              "metric": "java.lang:type=Threading.ThreadAllocatedMemoryEnabled",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadAllocatedMemorySupported": {
+              "metric": "java.lang:type=Threading.ThreadAllocatedMemorySupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/DaemonThreadCount": {
+              "metric": "java.lang:type=Threading.DaemonThreadCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/PeakThreadCount": {
+              "metric": "java.lang:type=Threading.PeakThreadCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/CurrentThreadCpuTimeSupported": {
+              "metric": "java.lang:type=Threading.CurrentThreadCpuTimeSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ObjectMonitorUsageSupported": {
+              "metric": "java.lang:type=Threading.ObjectMonitorUsageSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/SynchronizerUsageSupported": {
+              "metric": "java.lang:type=Threading.SynchronizerUsageSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadContentionMonitoringSupported": {
+              "metric": "java.lang:type=Threading.ThreadContentionMonitoringSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadCpuTimeEnabled": {
+              "metric": "java.lang:type=Threading.ThreadCpuTimeEnabled",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/CurrentThreadCpuTime": {
+              "metric": "java.lang:type=Threading.CurrentThreadCpuTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/CurrentThreadUserTime": {
+              "metric": "java.lang:type=Threading.CurrentThreadUserTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadCount": {
+              "metric": "java.lang:type=Threading.ThreadCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/TotalStartedThreadCount": {
+              "metric": "java.lang:type=Threading.TotalStartedThreadCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadCpuTimeSupported": {
+              "metric": "java.lang:type=Threading.ThreadCpuTimeSupported",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/ThreadContentionMonitoringEnabled": {
+              "metric": "java.lang:type=Threading.ThreadContentionMonitoringEnabled",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/Threading/AllThreadIds": {
+              "metric": "java.lang:typ

<TRUNCATED>

[03/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/scripts/custom_extensions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/scripts/custom_extensions.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/scripts/custom_extensions.py
new file mode 100755
index 0000000..5563d54
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/scripts/custom_extensions.py
@@ -0,0 +1,168 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from resource_management.core.resources import Directory
+from resource_management.core.resources import Execute
+from resource_management.libraries.functions import default
+from resource_management.libraries.script.script import Script
+
+
+def setup_extensions():
+  import params
+
+  # Hadoop Custom extensions
+  hadoop_custom_extensions_enabled = default("/configurations/core-site/hadoop.custom-extensions.enabled", False)
+  hadoop_custom_extensions_services = default("/configurations/core-site/hadoop.custom-extensions.services", "")
+  hadoop_custom_extensions_owner = default("/configurations/core-site/hadoop.custom-extensions.owner", params.hdfs_user)
+  hadoop_custom_extensions_services = [ service.strip().upper() for service in hadoop_custom_extensions_services.split(",") ]
+  hadoop_custom_extensions_services.append("YARN")
+  hadoop_custom_extensions_hdfs_dir = "/iop/ext/{0}/hadoop".format(params.stack_version_formatted)
+  hadoop_custom_extensions_local_dir = "{0}/current/ext/hadoop".format(Script.get_stack_root())
+
+  if params.current_service in hadoop_custom_extensions_services:
+    clean_extensions(hadoop_custom_extensions_local_dir)
+    if hadoop_custom_extensions_enabled:
+      download_extensions(hadoop_custom_extensions_owner, params.user_group,
+                          hadoop_custom_extensions_hdfs_dir,
+                          hadoop_custom_extensions_local_dir)
+
+  setup_extensions_hive()
+
+  hbase_custom_extensions_services = []
+  hbase_custom_extensions_services.append("HBASE")
+  if params.current_service in hbase_custom_extensions_services:
+    setup_hbase_extensions()
+
+def setup_extensions_hive():
+  import params
+
+  hive_custom_extensions_enabled = default("/configurations/hive-site/hive.custom-extensions.enabled", False)
+  hive_custom_extensions_owner = default("/configurations/hive-site/hive.custom-extensions.owner", params.hdfs_user)
+  hive_custom_extensions_hdfs_dir = "/iop/ext/{0}/hive".format(params.stack_version_formatted)
+  hive_custom_extensions_local_dir = "{0}/current/ext/hive".format(Script.get_stack_root())
+
+  impacted_components = ['HIVE_SERVER', 'HIVE_CLIENT'];
+  role = params.config.get('role','')
+
+  # Run copying for HIVE_SERVER and HIVE_CLIENT
+  if params.current_service == 'HIVE' and role in impacted_components:
+    clean_extensions(hive_custom_extensions_local_dir)
+    if hive_custom_extensions_enabled:
+      download_extensions(hive_custom_extensions_owner, params.user_group,
+                          hive_custom_extensions_hdfs_dir,
+                          hive_custom_extensions_local_dir)
+
+def download_extensions(owner_user, owner_group, hdfs_source_dir, local_target_dir):
+  """
+  :param owner_user: user owner of the HDFS directory
+  :param owner_group: group owner of the HDFS directory
+  :param hdfs_source_dir: the HDFS directory from where the files are being pull
+  :param local_target_dir: the location of where to download the files
+  :return: Will return True if successful, otherwise, False.
+  """
+  import params
+
+  if not os.path.isdir(local_target_dir):
+    import tempfile
+
+    #Create a secure random temp directory
+    tmp_dir=tempfile.mkdtemp()
+    cmd = ('chown', '-R', params.hdfs_user, tmp_dir)
+    Execute(cmd, sudo=True)
+    cmd = ('chmod', '755', tmp_dir)
+    Execute(cmd, sudo=True)
+
+    Directory(os.path.dirname(local_target_dir),
+              owner="root",
+              mode=0755,
+              group="root",
+              create_parents=True)
+
+    params.HdfsResource(hdfs_source_dir,
+                        type="directory",
+                        action="create_on_execute",
+                        owner=owner_user,
+                        group=owner_group,
+                        mode=0755)
+
+    # copy from hdfs to tmp_dir
+    params.HdfsResource(tmp_dir,
+                        type="directory",
+                        action="download_on_execute",
+                        source=hdfs_source_dir,
+                        user=params.hdfs_user,
+                        mode=0644,
+                        replace_existing_files=True)
+
+    # Execute command is not quoting correctly.
+    cmd = ('mv', tmp_dir, local_target_dir)
+    only_if_cmd = "ls -d {tmp_dir}/*".format(tmp_dir=tmp_dir)
+    Execute(cmd, only_if=only_if_cmd, sudo=True)
+
+    only_if_local = 'ls -d "{local_target_dir}"'.format(local_target_dir=local_target_dir)
+    Execute(("chown", "-R", "root:root", local_target_dir),
+            sudo=True,
+            only_if=only_if_local)
+
+    params.HdfsResource(None,action="execute")
+  return True
+
+def clean_extensions(local_dir):
+  """
+  :param local_dir: The local directory where the extensions are stored.
+  :return: Will return True if successful, otherwise, False.
+  """
+  if os.path.isdir(local_dir):
+    Directory(local_dir,
+              action="delete",
+              owner="root")
+  return True
+
+def setup_hbase_extensions():
+  import params
+
+  # HBase Custom extensions
+  hbase_custom_extensions_enabled = default("/configurations/hbase-site/hbase.custom-extensions.enabled", False)
+  hbase_custom_extensions_owner = default("/configurations/hbase-site/hbase.custom-extensions.owner", params.hdfs_user)
+  hbase_custom_extensions_hdfs_dir = "/iop/ext/{0}/hbase".format(params.stack_version_formatted)
+
+  if hbase_custom_extensions_enabled:
+    download_hbase_extensions(hbase_custom_extensions_owner, params.user_group,
+                        hbase_custom_extensions_hdfs_dir)
+
+def download_hbase_extensions(owner_user, owner_group, hdfs_source_dir):
+  """
+  :param owner_user: user owner of the HDFS directory
+  :param owner_group: group owner of the HDFS directory
+  :param hdfs_source_dir: the HDFS directory from where the files are
+  :return: Will return True if successful, otherwise, False.
+  """
+  import params
+
+  params.HdfsResource(hdfs_source_dir,
+                        type="directory",
+                        action="create_on_execute",
+                        owner=owner_user,
+                        group=owner_group,
+                        mode=0755)
+
+  params.HdfsResource(None,action="execute")
+  return True

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/scripts/hook.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/scripts/hook.py
new file mode 100755
index 0000000..8d64c70
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/scripts/hook.py
@@ -0,0 +1,41 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from custom_extensions import setup_extensions
+from rack_awareness import create_topology_script_and_mapping
+from shared_initialization import setup_hadoop, setup_configs, create_javahome_symlink
+
+class BeforeStartHook(Hook):
+
+  def hook(self, env):
+    import params
+
+    self.run_custom_hook('before-ANY')
+    env.set_params(params)
+
+    setup_hadoop()
+    setup_configs()
+    create_javahome_symlink()
+    create_topology_script_and_mapping()
+    setup_extensions()
+
+if __name__ == "__main__":
+  BeforeStartHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/scripts/params.py
new file mode 100755
index 0000000..be9db58
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/scripts/params.py
@@ -0,0 +1,318 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import default
+from resource_management.libraries.functions import format_jvm_option
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.version import format_stack_version, compare_versions
+from ambari_commons.os_check import OSCheck
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+
+config = Script.get_config()
+
+host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+dfs_type = default("/commandParams/dfs_type", "")
+hadoop_conf_dir = "/etc/hadoop/conf"
+
+component_list = default("/localComponents", [])
+
+hdfs_tmp_dir = default("/configurations/hadoop-env/hdfs_tmp_dir", "/tmp")
+
+# hadoop default params
+mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+
+hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
+hadoop_lib_home = stack_select.get_hadoop_dir("lib")
+hadoop_bin = stack_select.get_hadoop_dir("sbin")
+hadoop_home = '/usr'
+create_lib_snappy_symlinks = True
+
+# IOP 4.0+ params
+if Script.is_stack_greater_or_equal("4.0"):
+  mapreduce_libs_path = "/usr/iop/current/hadoop-mapreduce-client/*"
+  hadoop_home = stack_select.get_hadoop_dir("home")
+  create_lib_snappy_symlinks = False
+  
+current_service = config['serviceName']
+
+#security params
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+#users and groups
+has_hadoop_env = 'hadoop-env' in config['configurations']
+mapred_user = config['configurations']['mapred-env']['mapred_user']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+yarn_user = config['configurations']['yarn-env']['yarn_user']
+
+user_group = config['configurations']['cluster-env']['user_group']
+
+#hosts
+hostname = config["hostname"]
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+rm_host = default("/clusterHostInfo/rm_host", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
+hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+hs_host = default("/clusterHostInfo/hs_host", [])
+jtnode_host = default("/clusterHostInfo/jtnode_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
+
+has_namenode = not len(namenode_host) == 0
+has_resourcemanager = not len(rm_host) == 0
+has_slaves = not len(slave_hosts) == 0
+has_oozie_server = not len(oozie_servers) == 0
+has_hcat_server_host = not len(hcat_server_hosts) == 0
+has_hive_server_host = not len(hive_server_host) == 0
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_zk_host = not len(zk_hosts) == 0
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+has_metric_collector = not len(ams_collector_hosts) == 0
+
+is_namenode_master = hostname in namenode_host
+is_jtnode_master = hostname in jtnode_host
+is_rmnode_master = hostname in rm_host
+is_hsnode_master = hostname in hs_host
+is_hbase_master = hostname in hbase_master_hosts
+is_slave = hostname in slave_hosts
+if has_ganglia_server:
+  ganglia_server_host = ganglia_server_hosts[0]
+if has_metric_collector:
+  if 'cluster-env' in config['configurations'] and \
+      'metrics_collector_vip_host' in config['configurations']['cluster-env']:
+    metric_collector_host = config['configurations']['cluster-env']['metrics_collector_vip_host']
+  else:
+    metric_collector_host = ams_collector_hosts[0]
+  if 'cluster-env' in config['configurations'] and \
+      'metrics_collector_vip_port' in config['configurations']['cluster-env']:
+    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']
+  else:
+    metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "localhost:6188")
+    if metric_collector_web_address.find(':') != -1:
+      metric_collector_port = metric_collector_web_address.split(':')[1]
+    else:
+      metric_collector_port = '6188'
+  if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
+    metric_collector_protocol = 'https'
+  else:
+    metric_collector_protocol = 'http'
+  metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
+  metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
+  metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
+
+  pass
+metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
+metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
+
+#hadoop params
+
+if has_namenode or dfs_type == 'HCFS':
+  hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
+  hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
+  task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties")
+
+hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+hbase_tmp_dir = "/tmp/hbase-hbase"
+#db params
+server_db_name = config['hostLevelParams']['db_name']
+db_driver_filename = config['hostLevelParams']['db_driver_filename']
+oracle_driver_url = config['hostLevelParams']['oracle_jdbc_url']
+mysql_driver_url = config['hostLevelParams']['mysql_jdbc_url']
+ambari_server_resources = config['hostLevelParams']['jdk_location']
+oracle_driver_symlink_url = format("{ambari_server_resources}oracle-jdbc-driver.jar")
+mysql_driver_symlink_url = format("{ambari_server_resources}mysql-jdbc-driver.jar")
+
+ambari_db_rca_url = config['hostLevelParams']['ambari_db_rca_url'][0]
+ambari_db_rca_driver = config['hostLevelParams']['ambari_db_rca_driver'][0]
+ambari_db_rca_username = config['hostLevelParams']['ambari_db_rca_username'][0]
+ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password'][0]
+
+if has_namenode and 'rca_enabled' in config['configurations']['hadoop-env']:
+  rca_enabled =  config['configurations']['hadoop-env']['rca_enabled']
+else:
+  rca_enabled = False
+rca_disabled_prefix = "###"
+if rca_enabled == True:
+  rca_prefix = ""
+else:
+  rca_prefix = rca_disabled_prefix
+
+#hadoop-env.sh
+java_home = config['hostLevelParams']['java_home']
+
+jsvc_path = "/usr/lib/bigtop-utils"
+
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
+namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
+namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
+namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
+
+jtnode_opt_newsize = "200m"
+jtnode_opt_maxnewsize = "200m"
+jtnode_heapsize =  "1024m"
+ttnode_heapsize = "1024m"
+
+dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
+mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
+mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
+
+#log4j.properties
+
+yarn_log_dir_prefix = default("/configurations/yarn-env/yarn_log_dir_prefix","/var/log/hadoop-yarn")
+
+dfs_hosts = default('/configurations/hdfs-site/dfs.hosts', None)
+
+#log4j.properties
+if (('hdfs-log4j' in config['configurations']) and ('content' in config['configurations']['hdfs-log4j'])):
+  log4j_props = config['configurations']['hdfs-log4j']['content']
+  if (('yarn-log4j' in config['configurations']) and ('content' in config['configurations']['yarn-log4j'])):
+    log4j_props += config['configurations']['yarn-log4j']['content']
+else:
+  log4j_props = None
+
+refresh_topology = False
+command_params = config["commandParams"] if "commandParams" in config else None
+if command_params is not None:
+  refresh_topology = bool(command_params["refresh_topology"]) if "refresh_topology" in command_params else False
+  
+ambari_libs_dir = "/var/lib/ambari-agent/lib"
+is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
+#host info
+all_hosts = default("/clusterHostInfo/all_hosts", [])
+all_racks = default("/clusterHostInfo/all_racks", [])
+all_ipv4_ips = default("/clusterHostInfo/all_ipv4_ips", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+
+#topology files
+net_topology_script_file_path = "/etc/hadoop/conf/topology_script.py"
+net_topology_script_dir = os.path.dirname(net_topology_script_file_path)
+net_topology_mapping_data_file_name = 'topology_mappings.data'
+net_topology_mapping_data_file_path = os.path.join(net_topology_script_dir, net_topology_mapping_data_file_name)
+
+#Added logic to create /tmp and /user directory for HCFS stack.  
+has_core_site = 'core-site' in config['configurations']
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+kinit_path_local = get_kinit_path()
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', None)
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+smoke_user =  config['configurations']['cluster-env']['smokeuser']
+smoke_hdfs_user_dir = format("/user/{smoke_user}")
+smoke_hdfs_user_mode = 0770
+
+
+##### Namenode RPC ports - metrics config section start #####
+
+# Figure out the rpc ports for current namenode
+nn_rpc_client_port = None
+nn_rpc_dn_port = None
+nn_rpc_healthcheck_port = None
+
+namenode_id = None
+namenode_rpc = None
+
+dfs_ha_enabled = False
+dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.internal.nameservices', None)
+if dfs_ha_nameservices is None:
+  dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.nameservices', None)
+dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
+
+dfs_ha_namemodes_ids_list = []
+other_namenode_id = None
+
+if dfs_ha_namenode_ids:
+ dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
+ dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
+ if dfs_ha_namenode_ids_array_len > 1:
+   dfs_ha_enabled = True
+
+if dfs_ha_enabled:
+ for nn_id in dfs_ha_namemodes_ids_list:
+   nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
+   if hostname in nn_host:
+     namenode_id = nn_id
+     namenode_rpc = nn_host
+   pass
+ pass
+else:
+ namenode_rpc = default('/configurations/hdfs-site/dfs.namenode.rpc-address', None)
+
+if namenode_rpc:
+ nn_rpc_client_port = namenode_rpc.split(':')[1].strip()
+
+if dfs_ha_enabled:
+ dfs_service_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.servicerpc-address.{dfs_ha_nameservices}.{namenode_id}'), None)
+ dfs_lifeline_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.lifeline.rpc-address.{dfs_ha_nameservices}.{namenode_id}'), None)
+else:
+ dfs_service_rpc_address = default('/configurations/hdfs-site/dfs.namenode.servicerpc-address', None)
+ dfs_lifeline_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.lifeline.rpc-address'), None)
+
+if dfs_service_rpc_address:
+ nn_rpc_dn_port = dfs_service_rpc_address.split(':')[1].strip()
+
+if dfs_lifeline_rpc_address:
+ nn_rpc_healthcheck_port = dfs_lifeline_rpc_address.split(':')[1].strip()
+
+is_nn_client_port_configured = False if nn_rpc_client_port is None else True
+is_nn_dn_port_configured = False if nn_rpc_dn_port is None else True
+is_nn_healthcheck_port_configured = False if nn_rpc_healthcheck_port is None else True
+
+##### end #####
+
+import functools
+#create partial functions with common arguments for every HdfsResource call
+#to create/delete/copyfromlocal hdfs directories/files we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs,
+  immutable_paths = get_not_managed_resources(),
+  dfs_type = dfs_type
+)

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/scripts/rack_awareness.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/scripts/rack_awareness.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/scripts/rack_awareness.py
new file mode 100755
index 0000000..548f051
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/scripts/rack_awareness.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management.core.resources import File
+from resource_management.core.source import StaticFile, Template
+from resource_management.libraries.functions import format
+
+
+def create_topology_mapping():
+  import params
+
+  File(params.net_topology_mapping_data_file_path,
+       content=Template("topology_mappings.data.j2"),
+       owner=params.hdfs_user,
+       group=params.user_group,
+       only_if=format("test -d {net_topology_script_dir}"))
+
+def create_topology_script():
+  import params
+
+  File(params.net_topology_script_file_path,
+       content=StaticFile('topology_script.py'),
+       mode=0755,
+       only_if=format("test -d {net_topology_script_dir}"))
+
+def create_topology_script_and_mapping():
+  import params
+  if params.has_hadoop_env:
+    create_topology_mapping()
+    create_topology_script()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/scripts/shared_initialization.py
new file mode 100755
index 0000000..3366f58
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/scripts/shared_initialization.py
@@ -0,0 +1,177 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+from resource_management.libraries.providers.hdfs_resource import WebHDFSUtil
+
+from resource_management import *
+
+def setup_hadoop():
+  """
+  Setup hadoop files and directories
+  """
+  import params
+
+  Execute(("setenforce","0"),
+          only_if="test -f /selinux/enforce",
+          not_if="(! which getenforce ) || (which getenforce && getenforce | grep -q Disabled)",
+          sudo=True,
+  )
+
+  #directories
+  if params.has_namenode or params.dfs_type == 'HCFS':
+    Directory(params.hdfs_log_dir_prefix,
+              create_parents = True,
+              owner='root',
+              group=params.user_group,
+              mode=0775,
+              cd_access='a',
+    )
+    if params.has_namenode:
+      Directory(params.hadoop_pid_dir_prefix,
+              create_parents = True,
+              owner='root',
+              group='root',
+              cd_access='a',
+      )
+    Directory(params.hadoop_tmp_dir,
+              create_parents = True,
+              owner=params.hdfs_user,
+              cd_access='a',
+              )
+  #files
+    if params.security_enabled:
+      tc_owner = "root"
+    else:
+      tc_owner = params.hdfs_user
+      
+    # if WebHDFS is not enabled we need this jar to create hadoop folders.
+    if params.host_sys_prepped:
+      print "Skipping copying of fast-hdfs-resource.jar as host is sys prepped"
+    elif params.dfs_type == 'HCFS' or not WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs):
+      # for source-code of jar goto contrib/fast-hdfs-resource
+      File(format("{ambari_libs_dir}/fast-hdfs-resource.jar"),
+           mode=0644,
+           content=StaticFile("fast-hdfs-resource.jar")
+      )
+      
+    if os.path.exists(params.hadoop_conf_dir):
+      File(os.path.join(params.hadoop_conf_dir, 'commons-logging.properties'),
+           mode=0644,
+           group=params.user_group,
+           owner=tc_owner,
+           content=Template('commons-logging.properties.j2')
+      )
+
+      health_check_template_name = "health_check"
+      File(os.path.join(params.hadoop_conf_dir, health_check_template_name),
+           owner=tc_owner,
+           content=Template(health_check_template_name + ".j2")
+      )
+
+      log4j_filename = os.path.join(params.hadoop_conf_dir, "log4j.properties")
+      if (params.log4j_props != None):
+        File(log4j_filename,
+             mode=0644,
+             group=params.user_group,
+             owner=params.hdfs_user,
+             content=params.log4j_props
+        )
+      elif (os.path.exists(format("{params.hadoop_conf_dir}/log4j.properties"))):
+        File(log4j_filename,
+             mode=0644,
+             group=params.user_group,
+             owner=params.hdfs_user,
+        )
+
+      File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"),
+           owner=params.hdfs_user,
+           group=params.user_group,
+           content=Template("hadoop-metrics2.properties.j2")
+      )
+
+    if params.dfs_type == 'HCFS' and params.has_core_site and 'ECS_CLIENT' in params.component_list:
+       create_dirs()
+
+
+def setup_configs():
+  """
+  Creates configs for services HDFS mapred
+  """
+  import params
+
+  if params.has_namenode or params.dfs_type == 'HCFS':
+    if os.path.exists(params.hadoop_conf_dir):
+      File(params.task_log4j_properties_location,
+           content=StaticFile("task-log4j.properties"),
+           mode=0755
+      )
+
+    if os.path.exists(os.path.join(params.hadoop_conf_dir, 'configuration.xsl')):
+      File(os.path.join(params.hadoop_conf_dir, 'configuration.xsl'),
+           owner=params.hdfs_user,
+           group=params.user_group
+      )
+    if os.path.exists(os.path.join(params.hadoop_conf_dir, 'masters')):
+      File(os.path.join(params.hadoop_conf_dir, 'masters'),
+                owner=params.hdfs_user,
+                group=params.user_group
+      )
+
+  generate_include_file()
+
+
+def generate_include_file():
+  import params
+
+  if params.has_namenode and params.dfs_hosts and params.has_slaves:
+    include_hosts_list = params.slave_hosts
+    File(params.dfs_hosts,
+         content=Template("include_hosts_list.j2"),
+         owner=params.hdfs_user,
+         group=params.user_group
+    )
+
+def create_javahome_symlink():
+  if os.path.exists("/usr/jdk/jdk1.6.0_31") and not os.path.exists("/usr/jdk64/jdk1.6.0_31"):
+    Directory("/usr/jdk64/",
+         create_parents = True,
+    )
+    Link("/usr/jdk/jdk1.6.0_31",
+         to="/usr/jdk64/jdk1.6.0_31",
+    )
+
+def create_dirs():
+   import params
+   params.HdfsResource(params.hdfs_tmp_dir,
+                       type="directory",
+                       action="create_on_execute",
+                       owner=params.hdfs_user,
+                       mode=0777
+   )
+   params.HdfsResource(params.smoke_hdfs_user_dir,
+                       type="directory",
+                       action="create_on_execute",
+                       owner=params.smoke_user,
+                       mode=params.smoke_hdfs_user_mode
+   )
+   params.HdfsResource(None,
+                      action="execute"
+   )
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/templates/commons-logging.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/templates/commons-logging.properties.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/templates/commons-logging.properties.j2
new file mode 100755
index 0000000..2197ba5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/templates/commons-logging.properties.j2
@@ -0,0 +1,43 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+#Logging Implementation
+
+#Log4J
+org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger
+
+#JDK Logger
+#org.apache.commons.logging.Log=org.apache.commons.logging.impl.Jdk14Logger

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/templates/exclude_hosts_list.j2
new file mode 100755
index 0000000..1adba80
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/templates/exclude_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in hdfs_exclude_file %}
+{{host}}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/templates/hadoop-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/templates/hadoop-metrics2.properties.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/templates/hadoop-metrics2.properties.j2
new file mode 100755
index 0000000..6f32000
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/templates/hadoop-metrics2.properties.j2
@@ -0,0 +1,108 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# syntax: [prefix].[source|sink|jmx].[instance].[options]
+# See package.html for org.apache.hadoop.metrics2 for details
+
+{% if has_ganglia_server %}
+*.period=60
+
+*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+*.sink.ganglia.period=10
+
+# default for supportsparse is false
+*.sink.ganglia.supportsparse=true
+
+.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+# Hook up to the server
+namenode.sink.ganglia.servers={{ganglia_server_host}}:8661
+datanode.sink.ganglia.servers={{ganglia_server_host}}:8659
+jobtracker.sink.ganglia.servers={{ganglia_server_host}}:8662
+tasktracker.sink.ganglia.servers={{ganglia_server_host}}:8658
+maptask.sink.ganglia.servers={{ganglia_server_host}}:8660
+reducetask.sink.ganglia.servers={{ganglia_server_host}}:8660
+resourcemanager.sink.ganglia.servers={{ganglia_server_host}}:8664
+nodemanager.sink.ganglia.servers={{ganglia_server_host}}:8657
+historyserver.sink.ganglia.servers={{ganglia_server_host}}:8666
+journalnode.sink.ganglia.servers={{ganglia_server_host}}:8654
+nimbus.sink.ganglia.servers={{ganglia_server_host}}:8649
+supervisor.sink.ganglia.servers={{ganglia_server_host}}:8650
+
+resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
+
+{% endif %}
+
+{% if has_metric_collector %}
+
+*.period={{metrics_collection_period}}
+*.sink.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
+*.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+*.sink.timeline.period={{metrics_collection_period}}
+*.sink.timeline.sendInterval={{metrics_report_interval}}000
+*.sink.timeline.slave.host.name = {{hostname}}
+
+# HTTPS properties
+*.sink.timeline.truststore.path = {{metric_truststore_path}}
+*.sink.timeline.truststore.type = {{metric_truststore_type}}
+*.sink.timeline.truststore.password = {{metric_truststore_password}}
+
+datanode.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+namenode.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+resourcemanager.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+nodemanager.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+jobhistoryserver.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+journalnode.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+maptask.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+reducetask.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+applicationhistoryserver.sink.timeline.collector={{metric_collector_protocol}}://{{metric_collector_host}}:{{metric_collector_port}}
+
+resourcemanager.sink.timeline.tagsForPrefix.yarn=Queue
+
+{% if is_nn_client_port_configured %}
+# Namenode rpc ports customization
+namenode.sink.timeline.metric.rpc.client.port={{nn_rpc_client_port}}
+{% endif %}
+{% if is_nn_dn_port_configured %}
+namenode.sink.timeline.metric.rpc.datanode.port={{nn_rpc_dn_port}}
+{% endif %}
+{% if is_nn_healthcheck_port_configured %}
+namenode.sink.timeline.metric.rpc.healthcheck.port={{nn_rpc_healthcheck_port}}
+{% endif %}
+
+# Switch off container metrics
+*.source.filter.class=org.apache.hadoop.metrics2.filter.GlobFilter
+nodemanager.*.source.filter.exclude=*ContainerResource*
+
+{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/templates/health_check.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/templates/health_check.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/templates/health_check.j2
new file mode 100755
index 0000000..0a03d17
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/templates/health_check.j2
@@ -0,0 +1,81 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#!/bin/bash
+#
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+err=0;
+
+function check_disks {
+
+  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
+    fsdev=""
+    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
+    if [ -z "$fsdev" -a "$m" != "/mnt" ] ; then
+      msg_="$msg_ $m(u)"
+    else
+      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
+    fi
+  done
+
+  if [ -z "$msg_" ] ; then
+    echo "disks ok" ; exit 0
+  else
+    echo "$msg_" ; exit 2
+  fi
+
+}
+
+# Run all checks
+for check in disks ; do
+  msg=`check_${check}` ;
+  if [ $? -eq 0 ] ; then
+    ok_msg="$ok_msg$msg,"
+  else
+    err_msg="$err_msg$msg,"
+  fi
+done
+
+if [ ! -z "$err_msg" ] ; then
+  echo -n "ERROR $err_msg "
+fi
+if [ ! -z "$ok_msg" ] ; then
+  echo -n "OK: $ok_msg"
+fi
+
+echo
+
+# Success!
+exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/templates/include_hosts_list.j2
new file mode 100755
index 0000000..4a9e713
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/templates/include_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in slave_hosts %}
+{{host}}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/templates/topology_mappings.data.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/templates/topology_mappings.data.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/templates/topology_mappings.data.j2
new file mode 100755
index 0000000..15034d6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/hooks/before-START/templates/topology_mappings.data.j2
@@ -0,0 +1,24 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+    #
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+[network_topology]
+{% for host in all_hosts %}
+{% if host in slave_hosts %}
+{{host}}={{all_racks[loop.index-1]}}
+{{all_ipv4_ips[loop.index-1]}}={{all_racks[loop.index-1]}}
+{% endif %}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/kerberos.json b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/kerberos.json
new file mode 100755
index 0000000..92dcc8f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/kerberos.json
@@ -0,0 +1,47 @@
+{
+  "properties": {
+    "realm": "${kerberos-env/realm}",
+    "keytab_dir": "/etc/security/keytabs"
+  },
+  "identities": [
+    {
+      "name": "spnego",
+      "principal": {
+        "value": "HTTP/_HOST@${realm}",
+        "type" : "service"
+      },
+      "keytab": {
+        "file": "${keytab_dir}/spnego.service.keytab",
+        "owner": {
+          "name": "root",
+          "access": "r"
+        },
+        "group": {
+          "name": "${cluster-env/user_group}",
+          "access": "r"
+        }
+      }
+    },
+    {
+      "name": "smokeuser",
+      "principal": {
+        "value": "${cluster-env/smokeuser}-${cluster_name|toLower()}@${realm}",
+        "type" : "user",
+        "configuration": "cluster-env/smokeuser_principal_name",
+        "local_username" : "${cluster-env/smokeuser}"
+      },
+      "keytab": {
+        "file": "${keytab_dir}/smokeuser.headless.keytab",
+        "owner": {
+          "name": "${cluster-env/smokeuser}",
+          "access": "r"
+        },
+        "group": {
+          "name": "${cluster-env/user_group}",
+          "access": "r"
+        },
+        "configuration": "cluster-env/smokeuser_keytab"
+      }
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/metainfo.xml
new file mode 100755
index 0000000..3cf364e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/metainfo.xml
@@ -0,0 +1,25 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <versions>
+	  <active>true</active>
+    </versions>
+    <extends>4.2</extends>
+    <minJdk>1.7</minJdk>
+    <maxJdk>1.8</maxJdk>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/repos/repoinfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/repos/repoinfo.xml
new file mode 100755
index 0000000..99b74c2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/repos/repoinfo.xml
@@ -0,0 +1,32 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<reposinfo>
+  <mainrepoid>IOP-4.2.5</mainrepoid>
+  <os family="redhat6">
+    <repo>
+<baseurl>http://ibm-open-platform.ibm.com/repos/IOP/rhel/6/x86_64/4.2.x/Updates/4.2.5.0</baseurl>
+      <repoid>IOP-4.2.5</repoid>
+      <reponame>IOP</reponame>
+    </repo>
+    <repo>
+<baseurl>http://ibm-open-platform.ibm.com/repos/IOP-UTILS/rhel/6/x86_64/1.3</baseurl>
+      <repoid>IOP-UTILS-1.3</repoid>
+      <reponame>IOP-UTILS</reponame>
+    </repo>
+  </os>
+</reposinfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/repos/repoinfo.xml.amd64_RH6
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/repos/repoinfo.xml.amd64_RH6 b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/repos/repoinfo.xml.amd64_RH6
new file mode 100755
index 0000000..aab2745
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/repos/repoinfo.xml.amd64_RH6
@@ -0,0 +1,32 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<reposinfo>
+  <mainrepoid>IOP-4.2.5</mainrepoid>
+  <os family="redhat6">
+    <repo>
+      <baseurl>http://ibm-open-platform.ibm.com/repos/IOP/rhel/6/x86_64/4.2.x/GA/4.2.5.0/</baseurl>
+      <repoid>IOP-4.2.5</repoid>
+      <reponame>IOP</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://ibm-open-platform.ibm.com/repos/IOP-UTILS/rhel/6/x86_64/1.3/</baseurl>
+      <repoid>IOP-UTILS-1.3</repoid>
+      <reponame>IOP-UTILS</reponame>
+    </repo>
+  </os>
+</reposinfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/repos/repoinfo.xml.amd64_RH7
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/repos/repoinfo.xml.amd64_RH7 b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/repos/repoinfo.xml.amd64_RH7
new file mode 100755
index 0000000..dc30296
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/repos/repoinfo.xml.amd64_RH7
@@ -0,0 +1,32 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<reposinfo>
+  <mainrepoid>IOP-4.2.5</mainrepoid>
+  <os family="redhat7">
+    <repo>
+      <baseurl>http://ibm-open-platform.ibm.com/repos/IOP/rhel/7/x86_64/4.2.x/GA/4.2.5.0/</baseurl>
+      <repoid>IOP-4.2.5</repoid>
+      <reponame>IOP</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://ibm-open-platform.ibm.com/repos/IOP-UTILS/rhel/7/x86_64/1.3/</baseurl>
+      <repoid>IOP-UTILS-1.3</repoid>
+      <reponame>IOP-UTILS</reponame>
+    </repo>
+  </os>
+</reposinfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/repos/repoinfo.xml.amd64_SLES
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/repos/repoinfo.xml.amd64_SLES b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/repos/repoinfo.xml.amd64_SLES
new file mode 100755
index 0000000..94097e4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/repos/repoinfo.xml.amd64_SLES
@@ -0,0 +1,32 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<reposinfo>
+  <mainrepoid>IOP-4.2.5</mainrepoid>
+  <os family="suse11">
+    <repo>
+      <baseurl>https://ibm-open-platform.ibm.com/repos/IOP/sles/11/x86_64/4.2.x/GA/4.2.5.0/</baseurl>
+      <repoid>IOP-4.2.5</repoid>
+      <reponame>IOP</reponame>
+    </repo>
+    <repo>
+      <baseurl>https://ibm-open-platform.ibm.com/repos/IOP-UTILS/sles/11/x86_64/1.3/</baseurl>
+      <repoid>IOP-UTILS-1.3</repoid>
+      <reponame>IOP-UTILS</reponame>
+    </repo>
+  </os>
+</reposinfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/repos/repoinfo.xml.ppc64le_RH7
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/repos/repoinfo.xml.ppc64le_RH7 b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/repos/repoinfo.xml.ppc64le_RH7
new file mode 100755
index 0000000..bdcac70
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/repos/repoinfo.xml.ppc64le_RH7
@@ -0,0 +1,32 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<reposinfo>
+  <mainrepoid>IOP-4.2.5</mainrepoid>
+  <os family="redhat7">
+    <repo>
+      <baseurl>http://ibm-open-platform.ibm.com/repos/IOP/rhel/7/ppc64le/4.2.x/GA/4.2.5.0/</baseurl>
+      <repoid>IOP-4.2.5</repoid>
+      <reponame>IOP</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://ibm-open-platform.ibm.com/repos/IOP-UTILS/rhel/7/ppc64le/1.3/</baseurl>
+      <repoid>IOP-UTILS-1.3</repoid>
+      <reponame>IOP-UTILS</reponame>
+    </repo>
+  </os>
+</reposinfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/role_command_order.json b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/role_command_order.json
new file mode 100755
index 0000000..dc4811b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/role_command_order.json
@@ -0,0 +1,31 @@
+{
+  "_comment" : "Record format:",
+  "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
+  "general_deps" : {
+    "_comment" : "dependencies for all cases",
+    "HIVE_SERVER_INTERACTIVE-START": ["RESOURCEMANAGER-START", "NODEMANAGER-START", "MYSQL_SERVER-START"],
+    "RESOURCEMANAGER-STOP": ["HIVE_SERVER_INTERACTIVE-STOP", "SPARK2_THRIFTSERVER-STOP", "KERNEL_GATEWAY-STOP" ],
+    "NODEMANAGER-STOP": ["HIVE_SERVER_INTERACTIVE-STOP", "KERNEL_GATEWAY-STOP" ],
+    "NAMENODE-STOP": ["HIVE_SERVER_INTERACTIVE-STOP"],
+    "HIVE_SERVER_INTERACTIVE-RESTART": ["NODEMANAGER-RESTART", "MYSQL_SERVER-RESTART"],
+    "HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", "HIVE_METASTORE-START", "WEBHCAT_SERVER-START", "HIVE_SERVER_INTERACTIVE-START"],
+    "RANGER_ADMIN-START": ["ZOOKEEPER_SERVER-START", "INFRA_SOLR-START"],
+    "SPARK2_SERVICE_CHECK-SERVICE_CHECK" : ["SPARK2_JOBHISTORYSERVER-START", "APP_TIMELINE_SERVER-START"],
+    "HBASE_REST_SERVER-START": ["HBASE_MASTER-START"],
+    "TITAN_SERVER-START" : ["HBASE_SERVICE_CHECK-SERVICE_CHECK", "SOLR-START"],
+    "TITAN_SERVICE_CHECK-SERVICE_CHECK": ["TITAN_SERVER-START"],
+    "KERNEL_GATEWAY-INSTALL": ["SPARK2_CLIENT-INSTALL"],
+    "PYTHON_CLIENT-INSTALL": ["KERNEL_GATEWAY-INSTALL"],
+    "KERNEL_GATEWAY-START": ["NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START", "SPARK2_JOBHISTORYSERVER-START"],
+    "JNBG_SERVICE_CHECK-SERVICE_CHECK": ["KERNEL_GATEWAY-START"],
+    "R4ML-INSTALL": ["SPARK2_CLIENT-INSTALL", "SYSTEMML-INSTALL"],
+    "R4ML_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START", "SPARK2_JOBHISTORYSERVER-START"]
+  },
+  "_comment" : "Dependencies that are used when GLUSTERFS is not present in cluster",
+  "optional_no_glusterfs": {
+    "SPARK2_JOBHISTORYSERVER-START" : ["NAMENODE-START", "DATANODE-START"],
+    "SPARK2_THRIFTSERVER-START" : ["NAMENODE-START", "DATANODE-START", "HIVE_SERVER-START"],
+    "METRICS_GRAFANA-START": ["METRICS_COLLECTOR-START"],
+    "METRICS_COLLECTOR-STOP": ["METRICS_GRAFANA-STOP"]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/AMBARI_INFRA/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/AMBARI_INFRA/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/AMBARI_INFRA/metainfo.xml
new file mode 100755
index 0000000..635d525
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/AMBARI_INFRA/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>AMBARI_INFRA</name>
+      <extends>common-services/AMBARI_INFRA/0.1.0</extends>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/AMBARI_INFRA/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/AMBARI_INFRA/role_command_order.json b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/AMBARI_INFRA/role_command_order.json
new file mode 100755
index 0000000..7246e9e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/AMBARI_INFRA/role_command_order.json
@@ -0,0 +1,7 @@
+{
+  "general_deps" : {
+    "_comment" : "dependencies for ambari infra",
+    "INFRA_SOLR-START" : ["ZOOKEEPER_SERVER-START"],
+    "AMBARI_INFRA_SERVICE_CHECK-SERVICE_CHECK": ["INFRA_SOLR-START"]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/AMBARI_METRICS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/AMBARI_METRICS/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/AMBARI_METRICS/metainfo.xml
new file mode 100755
index 0000000..c8aa26e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/AMBARI_METRICS/metainfo.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0"?>
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>AMBARI_METRICS</name>
+      <extends>common-services/AMBARI_METRICS/0.1.0</extends>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/FLUME/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/FLUME/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/FLUME/metainfo.xml
new file mode 100755
index 0000000..bed2822
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/FLUME/metainfo.xml
@@ -0,0 +1,39 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>FLUME</name>
+      <version>1.7.0</version>
+      <extends>common-services/FLUME/1.7.0</extends>
+      
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>flume_4_2_5_*</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/configuration/hbase-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/configuration/hbase-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/configuration/hbase-env.xml
new file mode 100755
index 0000000..4a59bd8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/HBASE/configuration/hbase-env.xml
@@ -0,0 +1,198 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+  <property>
+    <name>hbase_log_dir</name>
+    <value>/var/log/hbase</value>
+    <display-name>HBase Log Dir Prefix</display-name>
+    <description>Log Directories for HBase.</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hbase_pid_dir</name>
+    <value>/var/run/hbase</value>
+    <display-name>HBase PID Dir</display-name>
+    <description>Pid Directory for HBase.</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hbase_regionserver_xmn_max</name>
+    <value>512</value>
+    <description>
+Sets the upper bound on HBase RegionServers' young generation size.
+This value is used in case the young generation size (-Xmn) calculated based on the max heapsize (hbase_regionserver_heapsize)
+and the -Xmn ratio (hbase_regionserver_xmn_ratio) exceeds this value.
+    </description>
+    <display-name>RegionServers maximum value for -Xmn</display-name>
+    <value-attributes>
+      <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hbase_regionserver_xmn_ratio</name>
+    <value>0.2</value>
+    <display-name>RegionServers -Xmn in -Xmx ratio</display-name>
+    <description>Percentage of max heap size (-Xmx) which used for young generation heap (-Xmn).</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hbase_user</name>
+    <display-name>HBase User</display-name>
+    <value>hbase</value>
+    <property-type>USER</property-type>
+    <description>HBase User Name.</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hbase_max_direct_memory_size</name>
+    <value/>
+    <display-name>HBase off-heap MaxDirectMemorySize</display-name>
+    <description>If not empty, adds '-XX:MaxDirectMemorySize={{hbase_max_direct_memory_size}}m' to HBASE_REGIONSERVER_OPTS.</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>phoenix_sql_enabled</name>
+    <value>false</value>
+    <description>Enable Phoenix SQL</description>
+    <display-name>Enable Phoenix</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>Enabled</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>Disabled</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <!-- hbase-env.sh -->
+  <property>
+    <name>content</name>
+    <display-name>hbase-env template</display-name>
+    <description>This is the jinja template for hbase-env.sh file</description>
+    <value>
+# Set environment variables here.
+
+# The java implementation to use. Java 1.6 required.
+export JAVA_HOME={{java64_home}}
+
+# HBase Configuration directory
+export HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}
+
+# Extra Java CLASSPATH elements. Optional.
+export HBASE_CLASSPATH=${HBASE_CLASSPATH}
+
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+# export HBASE_HEAPSIZE=1000
+
+# Extra Java runtime options.
+# Below are what we set by default. May only work with SUN JVM.
+# For more on why as well as other possible settings,
+# see http://wiki.apache.org/hadoop/PerformanceTuning
+export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`"
+# Uncomment below to enable java garbage collection logging.
+# export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log"
+
+# Uncomment and adjust to enable JMX exporting
+# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
+# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
+#
+# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
+# If you want to configure BucketCache, specify '-XX: MaxDirectMemorySize=' with proper direct memory size
+# export HBASE_THRIFT_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
+# export HBASE_ZOOKEEPER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
+
+# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
+export HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers
+
+# Extra ssh options. Empty by default.
+# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
+
+# Where log files are stored. $HBASE_HOME/logs by default.
+export HBASE_LOG_DIR={{log_dir}}
+
+# A string representing this instance of hbase. $USER by default.
+# export HBASE_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes. See 'man nice'.
+# export HBASE_NICENESS=10
+
+# The directory where pid files are stored. /tmp by default.
+export HBASE_PID_DIR={{pid_dir}}
+
+# Seconds to sleep between slave commands. Unset by default. This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HBASE_SLAVE_SLEEP=0.1
+
+# Tell HBase whether it should manage it's own instance of Zookeeper or not.
+export HBASE_MANAGES_ZK=false
+
+{% if java_version &lt; 8 %}
+JDK_DEPENDED_OPTS="-XX:PermSize=128m -XX:MaxPermSize=128m"
+{% endif %}
+      
+{% if security_enabled %}
+export HBASE_OPTS="$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.security.auth.login.config={{client_jaas_config_file}} -Djava.io.tmpdir={{java_io_tmpdir}}"
+export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} -Djava.security.auth.login.config={{master_jaas_config_file}} $JDK_DEPENDED_OPTS"
+export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} -Djava.security.auth.login.config={{regionserver_jaas_config_file}} $JDK_DEPENDED_OPTS"
+export HBASE_REST_OPTS="$HBASE_REST_OPTS -Djava.security.auth.login.config={{rest_jaas_config_file}}"
+export PHOENIX_QUERYSERVER_OPTS="$PHOENIX_QUERYSERVER_OPTS -Djava.security.auth.login.config={{queryserver_jaas_config_file}}"
+{% else %}
+export HBASE_OPTS="$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.io.tmpdir={{java_io_tmpdir}}"
+export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} $JDK_DEPENDED_OPTS"
+export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} $JDK_DEPENDED_OPTS"
+{% endif %}
+
+# HBase off-heap MaxDirectMemorySize
+export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS {% if hbase_max_direct_memory_size %} -XX:MaxDirectMemorySize={{hbase_max_direct_memory_size}}m {% endif %}"
+    </value>
+    <value-attributes>
+      <type>content</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>


[49/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/hadoop-metrics2.properties.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
new file mode 100755
index 0000000..31bf1c6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
@@ -0,0 +1,88 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# syntax: [prefix].[source|sink|jmx].[instance].[options]
+# See package.html for org.apache.hadoop.metrics2 for details
+
+{% if has_ganglia_server %}
+*.period=60
+
+*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+*.sink.ganglia.period=10
+
+# default for supportsparse is false
+*.sink.ganglia.supportsparse=true
+
+.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+# Hook up to the server
+namenode.sink.ganglia.servers={{ganglia_server_host}}:8661
+datanode.sink.ganglia.servers={{ganglia_server_host}}:8659
+jobtracker.sink.ganglia.servers={{ganglia_server_host}}:8662
+tasktracker.sink.ganglia.servers={{ganglia_server_host}}:8658
+maptask.sink.ganglia.servers={{ganglia_server_host}}:8660
+reducetask.sink.ganglia.servers={{ganglia_server_host}}:8660
+resourcemanager.sink.ganglia.servers={{ganglia_server_host}}:8664
+nodemanager.sink.ganglia.servers={{ganglia_server_host}}:8657
+historyserver.sink.ganglia.servers={{ganglia_server_host}}:8666
+journalnode.sink.ganglia.servers={{ganglia_server_host}}:8654
+nimbus.sink.ganglia.servers={{ganglia_server_host}}:8649
+supervisor.sink.ganglia.servers={{ganglia_server_host}}:8650
+
+resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
+
+{% endif %}
+{% if has_metric_collector %}
+
+*.period={{metrics_collection_period}}
+*.sink.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
+*.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+*.sink.timeline.period={{metrics_collection_period}}
+*.sink.timeline.sendInterval={{metrics_report_interval}}000
+*.sink.timeline.slave.host.name = {{hostname}}
+
+datanode.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
+namenode.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
+resourcemanager.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
+nodemanager.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
+historyserver.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
+journalnode.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
+nimbus.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
+supervisor.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
+maptask.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
+reducetask.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
+
+resourcemanager.sink.timeline.tagsForPrefix.yarn=Queue
+
+{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/health_check.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/health_check.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/health_check.j2
new file mode 100755
index 0000000..0a03d17
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/health_check.j2
@@ -0,0 +1,81 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#!/bin/bash
+#
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+err=0;
+
+function check_disks {
+
+  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
+    fsdev=""
+    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
+    if [ -z "$fsdev" -a "$m" != "/mnt" ] ; then
+      msg_="$msg_ $m(u)"
+    else
+      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
+    fi
+  done
+
+  if [ -z "$msg_" ] ; then
+    echo "disks ok" ; exit 0
+  else
+    echo "$msg_" ; exit 2
+  fi
+
+}
+
+# Run all checks
+for check in disks ; do
+  msg=`check_${check}` ;
+  if [ $? -eq 0 ] ; then
+    ok_msg="$ok_msg$msg,"
+  else
+    err_msg="$err_msg$msg,"
+  fi
+done
+
+if [ ! -z "$err_msg" ] ; then
+  echo -n "ERROR $err_msg "
+fi
+if [ ! -z "$ok_msg" ] ; then
+  echo -n "OK: $ok_msg"
+fi
+
+echo
+
+# Success!
+exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/include_hosts_list.j2
new file mode 100755
index 0000000..4a9e713
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/include_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in slave_hosts %}
+{{host}}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/topology_mappings.data.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/topology_mappings.data.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/topology_mappings.data.j2
new file mode 100755
index 0000000..15034d6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/topology_mappings.data.j2
@@ -0,0 +1,24 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+    #
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+[network_topology]
+{% for host in all_hosts %}
+{% if host in slave_hosts %}
+{{host}}={{all_racks[loop.index-1]}}
+{{all_ipv4_ips[loop.index-1]}}={{all_racks[loop.index-1]}}
+{% endif %}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/kerberos.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/kerberos.json
new file mode 100755
index 0000000..027c20b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/kerberos.json
@@ -0,0 +1,68 @@
+{
+  "properties": {
+    "realm": "${kerberos-env/realm}",
+    "keytab_dir": "/etc/security/keytabs"
+  },
+  "identities": [
+    {
+      "name": "spnego",
+      "principal": {
+        "value": "HTTP/_HOST@${realm}",
+        "type" : "service"
+      },
+      "keytab": {
+        "file": "${keytab_dir}/spnego.service.keytab",
+        "owner": {
+          "name": "root",
+          "access": "r"
+        },
+        "group": {
+          "name": "${cluster-env/user_group}",
+          "access": "r"
+        }
+      }
+    },
+    {
+      "name": "hdfs",
+      "principal": {
+        "value": "${hadoop-env/hdfs_user}@${realm}",
+        "type" : "user" ,
+        "configuration": "hadoop-env/hdfs_principal_name",
+        "local_username" : "${hadoop-env/hdfs_user}"
+      },
+      "keytab": {
+        "file": "${keytab_dir}/hdfs.headless.keytab",
+        "owner": {
+          "name": "${hadoop-env/hdfs_user}",
+          "access": "r"
+        },
+        "group": {
+          "name": "${cluster-env/user_group}",
+          "access": "r"
+        },
+        "configuration": "hadoop-env/hdfs_user_keytab"
+      }
+    },
+    {
+      "name": "smokeuser",
+      "principal": {
+        "value": "${cluster-env/smokeuser}@${realm}",
+        "type" : "user",
+        "configuration": "cluster-env/smokeuser_principal_name",
+        "local_username" : "${cluster-env/smokeuser}"
+      },
+      "keytab": {
+        "file": "${keytab_dir}/smokeuser.headless.keytab",
+        "owner": {
+          "name": "${cluster-env/smokeuser}",
+          "access": "r"
+        },
+        "group": {
+          "name": "${cluster-env/user_group}",
+          "access": "r"
+        },
+        "configuration": "cluster-env/smokeuser_keytab"
+      }
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/metainfo.xml
new file mode 100755
index 0000000..1f40439
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/metainfo.xml
@@ -0,0 +1,22 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <versions>
+	    <active>false</active>
+    </versions>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_features.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_features.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_features.json
new file mode 100755
index 0000000..4627e73
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_features.json
@@ -0,0 +1,212 @@
+{
+  "stack_features": [
+    {
+      "name": "snappy",
+      "description": "Snappy compressor/decompressor support",
+      "max_version": "4.0.0.0"
+    },
+    {
+      "name": "lzo",
+      "description": "LZO libraries support",
+      "min_version": "4.1.0.0"
+    },
+    {
+      "name": "express_upgrade",
+      "description": "Express upgrade support",
+      "min_version": "4.2.0.0"
+    },
+    {
+      "name": "rolling_upgrade",
+      "description": "Rolling upgrade support",
+      "min_version": "4.1.0.0"
+    },
+    {
+      "name": "config_versioning",
+      "description": "Configurable versions support",
+      "min_version": "4.1.0.0"
+    },
+    {
+      "name": "datanode_non_root",
+      "description": "DataNode running as non-root support (AMBARI-7615)",
+      "min_version": "4.1.0.0"
+    },
+    {
+      "name": "remove_ranger_hdfs_plugin_env",
+      "description": "HDFS removes Ranger env files (AMBARI-14299)",
+      "min_version": "4.2.0.0"
+    },
+    {
+      "name": "ranger",
+      "description": "Ranger Service support",
+      "min_version": "4.2.0.0"
+    },
+    {
+      "name": "ranger_tagsync_component",
+      "description": "Ranger Tagsync component support (AMBARI-14383)",
+      "min_version": "4.2.5.0"
+    },
+    {
+      "name": "phoenix",
+      "description": "Phoenix Service support",
+      "min_version": "4.2.0.0"
+    },
+    {
+      "name": "nfs",
+      "description": "NFS support",
+      "min_version": "4.1.0.0"
+    },
+    {
+      "name": "timeline_state_store",
+      "description": "Yarn application timeline-service supports state store property (AMBARI-11442)",
+      "min_version": "4.0.0.0"
+    },
+    {
+      "name": "copy_tarball_to_hdfs",
+      "description": "Copy tarball to HDFS support (AMBARI-12113)",
+      "min_version": "4.1.0.0"
+    },
+    {
+      "name": "spark_16plus",
+      "description": "Spark 1.6+",
+      "min_version": "4.2.0.0"
+    },
+    {
+      "name": "spark_thriftserver",
+      "description": "Spark Thrift Server",
+      "min_version": "4.2.0.0"
+    },
+    {
+      "name": "create_kafka_broker_id",
+      "description": "Ambari should create Kafka Broker Id (AMBARI-12678)",
+      "min_version": "4.0.0.0",
+      "max_version": "4.2.0.0"
+    },
+    {
+      "name": "kafka_listeners",
+      "description": "Kafka listeners (AMBARI-10984)",
+      "min_version": "4.2.0.0"
+    },
+    {
+      "name": "kafka_kerberos",
+      "description": "Kafka Kerberos support (AMBARI-10984)",
+      "min_version": "4.1.0.0"
+    },
+    {
+      "name": "ranger_usersync_non_root",
+      "description": "Ranger Usersync as non-root user (AMBARI-10416)",
+      "min_version": "4.1.0.0"
+    },
+    {
+      "name": "ranger_audit_db_support",
+      "description": "Ranger Audit to DB support",
+      "min_version": "4.2.0.0",
+      "max_version": "4.2.0.0"
+    },
+    {
+      "name": "knox_versioned_data_dir",
+      "description": "Use versioned data dir for Knox (AMBARI-13164)",
+      "min_version": "4.2.0.0"
+    },
+    {
+      "name": "knox_sso_topology",
+      "description": "Knox SSO Topology support (AMBARI-13975)",
+      "min_version": "4.2.0.0"
+    },
+    {
+      "name": "oozie_admin_user",
+      "description": "Oozie install user as an Oozie admin user (AMBARI-7976)",
+      "min_version": "4.0.0.0"
+    },
+    {
+      "name": "oozie_setup_shared_lib",
+      "description": "Oozie setup tools used to shared Oozie lib to HDFS (AMBARI-7240)",
+      "min_version": "4.0.0.0"
+    },
+    {
+      "name": "oozie_host_kerberos",
+      "description": "Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)",
+      "min_version": "4.0.0.0",
+      "max_version": "4.1.0.0"
+    },
+    {
+      "name": "hive_metastore_upgrade_schema",
+      "description": "Hive metastore upgrade schema support (AMBARI-11176)",
+      "min_version": "4.1.0.0"
+     },
+    {
+      "name": "hive_server_interactive",
+      "description": "Hive server interactive support (AMBARI-15573)",
+      "min_version": "4.4.0.0"
+     },
+    {
+      "name": "hive_webhcat_specific_configs",
+      "description": "Hive webhcat specific configurations support (AMBARI-12364)",
+      "min_version": "4.1.0.0"
+     },
+    {
+      "name": "hive_purge_table",
+      "description": "Hive purge table support (AMBARI-12260)",
+      "min_version": "4.1.0.0"
+     },
+    {
+      "name": "hive_server2_kerberized_env",
+      "description": "Hive server2 working on kerberized environment (AMBARI-13749)",
+      "max_version": "4.1.0.0"
+     },
+    {
+      "name": "hive_env_heapsize",
+      "description": "Hive heapsize property defined in hive-env (AMBARI-12801)",
+      "min_version": "4.0.0.0"
+    },
+    {
+      "name": "ranger_kms_hsm_support",
+      "description": "Ranger KMS HSM support (AMBARI-15752)",
+      "min_version": "4.2.5.0"
+    },
+    {
+      "name": "ranger_log4j_support",
+      "description": "Ranger supporting log-4j properties (AMBARI-15681)",
+      "min_version": "4.2.5.0"
+    },
+    {
+      "name": "ranger_kerberos_support",
+      "description": "Ranger Kerberos support",
+      "min_version": "4.2.5.0"
+    },
+    {
+      "name": "hive_metastore_site_support",
+      "description": "Hive Metastore site support",
+      "min_version": "4.2.5.0"
+    },
+    {
+      "name": "ranger_usersync_password_jceks",
+      "description": "Saving Ranger Usersync credentials in jceks",
+      "min_version": "4.2.5.0"
+    },
+    {
+      "name": "ranger_install_logsearch_client",
+      "description": "LogSearch Service support",
+      "min_version": "4.2.5.0"
+    },
+    {
+      "name": "hbase_home_directory",
+      "description": "Hbase home directory in HDFS needed for HBASE backup",
+      "min_version": "4.2.5.0"
+    },
+    {
+      "name": "spark_livy",
+      "description": "Livy as slave component of spark",
+      "min_version": "4.4.0.0"
+    },
+    {
+      "name": "ranger_pid_support",
+      "description": "Ranger Service support pid generation AMBARI-16756",
+      "min_version": "4.2.5.0"
+    },
+    {
+      "name": "ranger_kms_pid_support",
+      "description": "Ranger KMS Service support pid generation",
+      "min_version": "4.2.5.0"
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_tools.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_tools.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_tools.json
new file mode 100755
index 0000000..fdbbdf9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_tools.json
@@ -0,0 +1,4 @@
+{
+  "stack_selector": ["iop-select", "/usr/bin/iop-select", "iop-select"],
+  "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/repos/repoinfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/repos/repoinfo.xml
new file mode 100755
index 0000000..034d528
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/repos/repoinfo.xml
@@ -0,0 +1,35 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<reposinfo>
+  <!-- TODO define latest json file for iop 
+  <latest>http://s3.amazonaws.com/dev.hortonworks.com/HDP/hdp_urlinfo.json</latest>
+  -->
+  <mainrepoid>IOP-4.0</mainrepoid>
+  <os family="redhat6">
+    <repo>
+      <baseurl>http://ibm-open-platform.ibm.com/repos/IOP/RHEL6/x86_64/4.0</baseurl>
+      <repoid>IOP-4.0</repoid>
+      <reponame>IOP</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://ibm-open-platform.ibm.com/repos/IOP-UTILS/RHEL6/x86_64/1.0</baseurl>
+      <repoid>IOP-UTILS-1.0</repoid>
+      <reponame>IOP-UTILS</reponame>
+    </repo>
+  </os>
+</reposinfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/role_command_order.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/role_command_order.json
new file mode 100755
index 0000000..bbf1905
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/role_command_order.json
@@ -0,0 +1,70 @@
+{
+  "_comment" : "Record format:",
+  "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
+  "general_deps" : {
+    "_comment" : "dependencies for all cases",
+    "HBASE_MASTER-START": ["ZOOKEEPER_SERVER-START"],
+    "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
+    "OOZIE_SERVER-START": ["NODEMANAGER-START", "RESOURCEMANAGER-START","ZOOKEEPER_SERVER-START"],
+    "WEBHCAT_SERVER-START": ["NODEMANAGER-START", "HIVE_SERVER-START"],
+    "WEBHCAT_SERVER-RESTART": ["NODEMANAGER-RESTART", "HIVE_SERVER-RESTART"],
+    "HIVE_METASTORE-START": ["MYSQL_SERVER-START", "NAMENODE-START"],
+    "HIVE_METASTORE-RESTART": ["MYSQL_SERVER-RESTART", "NAMENODE-RESTART"],
+    "HIVE_SERVER-START": ["NODEMANAGER-START", "MYSQL_SERVER-START"],
+    "HIVE_SERVER-RESTART": ["NODEMANAGER-RESTART", "MYSQL_SERVER-RESTART"],
+    "FLUME_HANDLER-START": ["OOZIE_SERVER-START"],
+    "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
+    "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START", "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK"],
+    "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
+    "HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", "HIVE_METASTORE-START", "WEBHCAT_SERVER-START"],
+    "HCAT_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START"],
+    "WEBHCAT_SERVICE_CHECK-SERVICE_CHECK": ["WEBHCAT_SERVER-START"],
+    "PIG_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
+    "SQOOP_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
+    "ZOOKEEPER_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],
+    "ZOOKEEPER_QUORUM_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],
+    "ZOOKEEPER_SERVER-STOP" : ["HBASE_MASTER-STOP", "HBASE_REGIONSERVER-STOP"],
+    "HBASE_MASTER-STOP": ["HBASE_REGIONSERVER-STOP"],
+    "SLIDER_SERVICE_CHECK-SERVICE_CHECK" : ["NODEMANAGER-START", "RESOURCEMANAGER-START"],    
+    "SPARK_SERVICE_CHECK-SERVICE_CHECK" : ["SPARK_JOBHISTORYSERVER-START", "SPARK_THRIFTSERVER-START", "APP_TIMELINE_SERVER-START"],
+    "FLUME_SERVICE_CHECK-SERVICE_CHECK": ["FLUME_HANDLER-START"]
+  },
+  "_comment" : "GLUSTERFS-specific dependencies",
+  "optional_glusterfs": {
+    "HBASE_MASTER-START": ["PEERSTATUS-START"],
+    "GLUSTERFS_SERVICE_CHECK-SERVICE_CHECK": ["PEERSTATUS-START"]
+  },
+  "_comment" : "Dependencies that are used when GLUSTERFS is not present in cluster",
+  "optional_no_glusterfs": {
+    "SECONDARY_NAMENODE-START": ["NAMENODE-START"],
+    "RESOURCEMANAGER-START": ["NAMENODE-START", "DATANODE-START"],
+    "NODEMANAGER-START": ["NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START"],
+    "HISTORYSERVER-START": ["NAMENODE-START", "DATANODE-START"],
+    "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START"],
+    "HIVE_SERVER-START": ["DATANODE-START"],
+    "WEBHCAT_SERVER-START": ["DATANODE-START"],
+    "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START",
+        "SECONDARY_NAMENODE-START"],
+    "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START",
+        "RESOURCEMANAGER-START", "HISTORYSERVER-START", "YARN_SERVICE_CHECK-SERVICE_CHECK"],
+    "YARN_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
+    "RESOURCEMANAGER_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START"],
+    "PIG_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START", "NODEMANAGER-START"],
+    "NAMENODE-STOP": ["RESOURCEMANAGER-STOP", "NODEMANAGER-STOP",
+        "HISTORYSERVER-STOP", "HBASE_MASTER-STOP"],
+    "DATANODE-STOP": ["RESOURCEMANAGER-STOP", "NODEMANAGER-STOP",
+        "HISTORYSERVER-STOP", "HBASE_MASTER-STOP"],
+    "SPARK_JOBHISTORYSERVER-START" : ["NAMENODE-START"],
+    "SPARK_THRIFTSERVER-START" : ["NAMENODE-START", "HIVE_METASTORE-START"],
+    "APP_TIMELINE_SERVER-START": ["NAMENODE-START", "DATANODE-START"]
+  },
+  "_comment" : "Dependencies that are used in HA NameNode cluster",
+  "namenode_optional_ha": {
+    "NAMENODE-START": ["ZKFC-START", "JOURNALNODE-START", "ZOOKEEPER_SERVER-START"],
+    "ZKFC-START": ["ZOOKEEPER_SERVER-START"]
+  },
+  "_comment" : "Dependencies that are used in ResourceManager HA cluster",
+  "resourcemanager_optional_ha" : {
+    "RESOURCEMANAGER-START": ["ZOOKEEPER_SERVER-START"]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/alerts.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/alerts.json
new file mode 100755
index 0000000..221e585
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/alerts.json
@@ -0,0 +1,183 @@
+{
+  "AMBARI_METRICS": {
+    "service": [
+      {
+        "name": "metrics_monitor_process_percent",
+        "label": "Percent Metrics Monitors Available",
+        "description": "This alert is triggered if a percentage of Metrics Monitor processes are not up and listening on the network for the configured warning and critical thresholds.",
+        "interval": 1,
+        "scope": "SERVICE",
+        "enabled": true,
+        "source": {
+          "type": "AGGREGATE",
+          "alert_name": "ams_metrics_monitor_process",
+          "reporting": {
+            "ok": {
+              "text": "affected: [{1}], total: [{0}]"
+            },
+            "warning": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 0.1
+            },
+            "critical": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 0.3
+            }
+          }
+        }
+      }
+    ],
+    "METRICS_COLLECTOR": [
+      {
+        "name": "ams_metrics_collector_autostart",
+        "label": "Metrics Collector - Auto-Restart Status",
+        "description": "This alert is triggered if the Metrics Collector has been restarted automatically too frequently in last one hour. By default, a Warning alert is triggered if restarted twice in one hour and a Critical alert is triggered if restarted 4 or more times in one hour.",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "RECOVERY",
+          "reporting": {
+            "ok": {
+              "text": "Metrics Collector has not been auto-started and is running normally{0}."
+            },
+            "warning": {
+              "text": "Metrics Collector has been auto-started {1} times{0}.",
+              "count": 2
+            },
+            "critical": {
+              "text": "Metrics Collector has been auto-started {1} times{0}.",
+              "count": 4
+            }
+          }
+        }
+      },
+      {
+        "name": "ams_metrics_collector_process",
+        "label": "Metrics Collector Process",
+        "description": "This alert is triggered if the Metrics Collector cannot be confirmed to be up and listening on the configured port for number of seconds equal to threshold.",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "PORT",
+          "uri": "{{ams-site/timeline.metrics.service.webapp.address}}",
+          "default_port": 6188,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      },
+      {
+        "name": "ams_metrics_collector_hbase_master_process",
+        "label": "Metrics Collector - HBase Master Process",
+        "description": "This alert is triggered if the Metrics Collector's HBase master processes cannot be confirmed to be up and listening on the network for the configured critical threshold, given in seconds.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "PORT",
+          "uri": "{{ams-hbase-site/hbase.master.info.port}}",
+          "default_port": 61310,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      },
+      {
+        "name": "ams_metrics_collector_hbase_master_cpu",
+        "label": "Metrics Collector - HBase Master CPU Utilization",
+        "description": "This host-level alert is triggered if CPU utilization of the Metrics Collector's HBase Master exceeds certain warning and critical thresholds. It checks the HBase Master JMX Servlet for the SystemCPULoad property. The threshold values are in percent.",
+        "interval": 5,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{ams-hbase-site/hbase.master.info.port}}",
+            "default_port": 61310,
+            "connection_timeout": 5.0
+          },
+          "reporting": {
+            "ok": {
+              "text": "{1} CPU, load {0:.1%}"
+            },
+            "warning": {
+              "text": "{1} CPU, load {0:.1%}",
+              "value": 200
+            },
+            "critical": {
+              "text": "{1} CPU, load {0:.1%}",
+              "value": 250
+            },
+            "units" : "%"
+          },
+          "jmx": {
+            "property_list": [
+              "java.lang:type=OperatingSystem/SystemCpuLoad",
+              "java.lang:type=OperatingSystem/AvailableProcessors"
+            ],
+            "value": "{0} * 100"
+          }
+        }
+      },
+      {
+        "name": "ams_metrics_collector_zookeeper_server_process",
+        "label": "Metrics Collector - ZooKeeper Server Process",
+        "description": "This host-level alert is triggered if the Metrics Collector's ZooKeeper server process cannot be determined to be up and listening on the network.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "PORT",
+          "uri": "{{ams-hbase-site/hbase.zookeeper.property.clientPort}}",
+          "default_port": 61181,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ],
+    "METRICS_MONITOR": [
+      {
+        "name": "ams_metrics_monitor_process",
+        "label": "Metrics Monitor Status",
+        "description": "This alert indicates the status of the Metrics Monitor process as determined by the monitor status script.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "SCRIPT",
+          "path": "AMBARI_METRICS/0.1.0.4.1/package/alerts/alert_ambari_metrics_monitor.py"
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-env.xml
new file mode 100755
index 0000000..5910253
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-env.xml
@@ -0,0 +1,107 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+
+<configuration>
+  <property>
+    <name>ambari_metrics_user</name>
+    <display-name>Ambari Metrics User</display-name>
+    <value>ams</value>
+    <property-type>USER</property-type>
+    <description>Ambari Metrics User Name.</description>
+  </property>
+  <property>
+    <name>metrics_collector_log_dir</name>
+    <value>/var/log/ambari-metrics-collector</value>
+    <display-name>Metrics Collector log dir</display-name>
+    <description>Collector log directory.</description>
+  </property>
+  <property>
+    <name>metrics_collector_pid_dir</name>
+    <value>/var/run/ambari-metrics-collector</value>
+    <display-name>Metrics Collector pid dir</display-name>
+    <description>Collector pid directory.</description>
+  </property>
+  <property>
+    <name>metrics_monitor_pid_dir</name>
+    <value>/var/run/ambari-metrics-monitor</value>
+    <display-name>Metrics Monitor pid dir</display-name>
+    <description>Monitor pid directory.</description>
+  </property>
+  <property>
+    <name>metrics_monitor_log_dir</name>
+    <value>/var/log/ambari-metrics-monitor</value>
+    <display-name>Metrics Monitor log dir</display-name>
+    <description>Monitor log directory.</description>
+  </property>
+  <property>
+    <name>metrics_collector_heapsize</name>
+    <value>512</value>
+    <description>Metrics Collector Heap Size</description>
+    <value-attributes>
+      <type>int</type>
+      <unit>MB</unit>
+    </value-attributes>
+  </property>
+  <property>
+    <name>content</name>
+    <value>
+# Set environment variables here.
+
+# The java implementation to use. Java 1.6 required.
+export JAVA_HOME={{java64_home}}
+
+# Collector Log directory for log4j
+export AMS_COLLECTOR_LOG_DIR={{ams_collector_log_dir}}
+
+# Monitor Log directory for outfile
+export AMS_MONITOR_LOG_DIR={{ams_monitor_log_dir}}
+
+# Collector pid directory
+export AMS_COLLECTOR_PID_DIR={{ams_collector_pid_dir}}
+
+# Monitor pid directory
+export AMS_MONITOR_PID_DIR={{ams_monitor_pid_dir}}
+
+# AMS HBase pid directory
+export AMS_HBASE_PID_DIR={{hbase_pid_dir}}
+
+# AMS Collector heapsize
+export AMS_COLLECTOR_HEAPSIZE={{metrics_collector_heapsize}}
+
+# HBase normalizer enabled
+export AMS_HBASE_NORMALIZER_ENABLED={{ams_hbase_normalizer_enabled}}
+
+# HBase compaction policy enabled
+export AMS_HBASE_FIFO_COMPACTION_ENABLED={{ams_hbase_fifo_compaction_enabled}}
+
+# AMS Collector options
+export AMS_COLLECTOR_OPTS="-Djava.library.path=/usr/lib/ams-hbase/lib/hadoop-native"
+{% if security_enabled %}
+export AMS_COLLECTOR_OPTS="$AMS_COLLECTOR_OPTS -Djava.security.auth.login.config={{ams_collector_jaas_config_file}} -Dzookeeper.sasl.client.username={{zk_servicename}}"
+{% endif %}
+
+# AMS Collector GC options
+export AMS_COLLECTOR_GC_OPTS="-XX:+UseConcMarkSweepGC -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{ams_collector_log_dir}}/collector-gc.log-`date +'%Y%m%d%H%M'`"
+export AMS_COLLECTOR_OPTS="$AMS_COLLECTOR_OPTS $AMS_COLLECTOR_GC_OPTS"
+
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-hbase-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-hbase-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-hbase-env.xml
new file mode 100755
index 0000000..379297f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-hbase-env.xml
@@ -0,0 +1,234 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>hbase_log_dir</name>
+    <value>/var/log/ambari-metrics-collector</value>
+    <description>Log Directories for HBase.</description>
+  </property>
+  <property>
+    <name>hbase_pid_dir</name>
+    <value>/var/run/ambari-metrics-collector/</value>
+    <description>Pid Directory for HBase.</description>
+  </property>
+  <property>
+    <name>hbase_classpath_additional</name>
+    <value></value>
+    <description>Additional directory or jar in classpath for HBase.</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase_regionserver_heapsize</name>
+    <value>1024</value>
+    <description>
+        HBase RegionServer Heap Size. In embedded mode, total heap size is
+        sum of master and regionserver heap sizes.
+    </description>
+    <value-attributes>
+      <type>int</type>
+      <unit>MB</unit>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ams-hbase-site</type>
+        <name>hbase.cluster.distributed</name>
+      </property>
+      <property>
+        <type>ams-hbase-site</type>
+        <name>hbase.rootdir</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>regionserver_xmn_size</name>
+    <value>256</value>
+    <description>HBase RegionServer maximum value for young generation heap size.</description>
+    <value-attributes>
+      <type>int</type>
+      <unit>MB</unit>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ams-hbase-site</type>
+        <name>hbase.cluster.distributed</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>hbase_master_xmn_size</name>
+    <value>256</value>
+    <description>
+      HBase Master maximum value for young generation heap size.
+    </description>
+    <value-attributes>
+      <type>int</type>
+      <unit>MB</unit>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ams-hbase-site</type>
+        <name>hbase.cluster.distributed</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>hbase_master_maxperm_size</name>
+    <value>128</value>
+    <description>HBase RegionServer maximum value for perm heap size.</description>
+    <value-attributes>
+      <type>int</type>
+      <unit>MB</unit>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase_regionserver_xmn_ratio</name>
+    <value>0.2</value>
+    <description>Percentage of max heap size (-Xmx) which used for young generation heap (-Xmn).</description>
+  </property>
+  <property>
+    <name>hbase_master_heapsize</name>
+    <value>1024</value>
+    <description>
+        HBase Master Heap Size. In embedded mode, total heap size is
+        sum of master and regionserver heap sizes.
+    </description>
+    <value-attributes>
+      <type>int</type>
+      <unit>MB</unit>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ams-hbase-site</type>
+        <name>hbase.cluster.distributed</name>
+      </property>
+      <property>
+        <type>ams-hbase-site</type>
+        <name>hbase.rootdir</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>max_open_files_limit</name>
+    <value>32768</value>
+    <description>
+        The maximum number of open file descriptors by process
+    </description>
+  </property>
+
+  <!-- hbase-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for hbase-env.sh file</description>
+    <value>
+# Set environment variables here.
+
+# The java implementation to use. Java 1.6+ required.
+export JAVA_HOME={{java64_home}}
+
+# HBase Configuration directory
+export HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}
+
+# Extra Java CLASSPATH elements. Optional.
+additional_cp={{hbase_classpath_additional}}
+if [  -n "$additional_cp" ];
+then
+  export HBASE_CLASSPATH=${HBASE_CLASSPATH}:$additional_cp
+else
+  export HBASE_CLASSPATH=${HBASE_CLASSPATH}
+fi
+
+# The maximum amount of heap to use for hbase shell.
+export HBASE_SHELL_OPTS="-Xmx256m"
+
+# Extra Java runtime options.
+# Below are what we set by default. May only work with SUN JVM.
+# For more on why as well as other possible settings,
+# see http://wiki.apache.org/hadoop/PerformanceTuning
+export HBASE_OPTS="-XX:+UseConcMarkSweepGC -XX:ErrorFile={{hbase_log_dir}}/hs_err_pid%p.log -Djava.io.tmpdir={{hbase_tmp_dir}}"
+export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{hbase_log_dir}}/gc.log-`date +'%Y%m%d%H%M'`"
+# Uncomment below to enable java garbage collection logging.
+# export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log"
+
+# Uncomment and adjust to enable JMX exporting
+# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
+# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
+#
+# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
+
+{% if java_version &lt; 8 %}
+export HBASE_MASTER_OPTS=" -XX:PermSize=64m -XX:MaxPermSize={{hbase_master_maxperm_size}} -Xms{{hbase_heapsize}} -Xmx{{hbase_heapsize}} -Xmn{{hbase_master_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly"
+export HBASE_REGIONSERVER_OPTS="-XX:MaxPermSize=128m -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}"
+{% else %}
+export HBASE_MASTER_OPTS=" -Xms{{hbase_heapsize}} -Xmx{{hbase_heapsize}} -Xmn{{hbase_master_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly"
+export HBASE_REGIONSERVER_OPTS=" -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}"
+{% endif %}
+
+
+# export HBASE_THRIFT_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
+# export HBASE_ZOOKEEPER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
+
+# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
+export HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers
+
+# Extra ssh options. Empty by default.
+# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
+
+# Where log files are stored. $HBASE_HOME/logs by default.
+export HBASE_LOG_DIR={{hbase_log_dir}}
+
+# A string representing this instance of hbase. $USER by default.
+# export HBASE_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes. See 'man nice'.
+# export HBASE_NICENESS=10
+
+# The directory where pid files are stored. /tmp by default.
+export HBASE_PID_DIR={{hbase_pid_dir}}
+
+# Seconds to sleep between slave commands. Unset by default. This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HBASE_SLAVE_SLEEP=0.1
+
+# Tell HBase whether it should manage it's own instance of Zookeeper or not.
+export HBASE_MANAGES_ZK=false
+
+{% if security_enabled %}
+export HBASE_OPTS="$HBASE_OPTS -Djava.security.auth.login.config={{client_jaas_config_file}} -Dzookeeper.sasl.client.username={{zk_servicename}}"
+export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Djava.security.auth.login.config={{master_jaas_config_file}} -Dzookeeper.sasl.client.username={{zk_servicename}}"
+export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Djava.security.auth.login.config={{regionserver_jaas_config_file}} -Dzookeeper.sasl.client.username={{zk_servicename}}"
+export HBASE_ZOOKEEPER_OPTS="$HBASE_ZOOKEEPER_OPTS -Djava.security.auth.login.config={{ams_zookeeper_jaas_config_file}} -Dzookeeper.sasl.client.username={{zk_servicename}}"
+{% endif %}
+
+# use embedded native libs
+_HADOOP_NATIVE_LIB="/usr/lib/ams-hbase/lib/hadoop-native/"
+export HBASE_OPTS="$HBASE_OPTS -Djava.library.path=${_HADOOP_NATIVE_LIB}"
+
+# Unset HADOOP_HOME to avoid importing HADOOP installed cluster related configs
+export HADOOP_HOME={{ams_hbase_home_dir}}
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-hbase-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-hbase-log4j.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-hbase-log4j.xml
new file mode 100755
index 0000000..52ead29
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-hbase-log4j.xml
@@ -0,0 +1,146 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <description>Custom log4j.properties</description>
+    <value>
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Define some default values that can be overridden by system properties
+hbase.root.logger=INFO,console
+hbase.security.logger=INFO,console
+hbase.log.dir=.
+hbase.log.file=hbase.log
+
+# Define the root logger to the system property "hbase.root.logger".
+log4j.rootLogger=${hbase.root.logger}
+
+# Logging Threshold
+log4j.threshold=ALL
+
+#
+# Daily Rolling File Appender
+#
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n
+
+# Rolling File Appender properties
+hbase.log.maxfilesize=256MB
+hbase.log.maxbackupindex=20
+
+# Rolling File Appender
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hbase.log.dir}/${hbase.log.file}
+
+log4j.appender.RFA.MaxFileSize=${hbase.log.maxfilesize}
+log4j.appender.RFA.MaxBackupIndex=${hbase.log.maxbackupindex}
+
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n
+
+#
+# Security audit appender
+#
+hbase.security.log.file=SecurityAuth.audit
+hbase.security.log.maxfilesize=256MB
+hbase.security.log.maxbackupindex=20
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAS.File=${hbase.log.dir}/${hbase.security.log.file}
+log4j.appender.RFAS.MaxFileSize=${hbase.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hbase.security.log.maxbackupindex}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.category.SecurityLogger=${hbase.security.logger}
+log4j.additivity.SecurityLogger=false
+#log4j.logger.SecurityLogger.org.apache.hadoop.hbase.security.access.AccessController=TRACE
+
+#
+# Null Appender
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n
+
+# Custom Logging levels
+
+log4j.logger.org.apache.zookeeper=INFO
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.hbase=INFO
+# Make these two classes INFO-level. Make them DEBUG to see more zk debug.
+log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO
+log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO
+#log4j.logger.org.apache.hadoop.dfs=DEBUG
+# Set this class to log INFO only otherwise its OTT
+# Enable this to get detailed connection error/retry logging.
+# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE
+
+
+# Uncomment this line to enable tracing on _every_ RPC call (this can be a lot of output)
+#log4j.logger.org.apache.hadoop.ipc.HBaseServer.trace=DEBUG
+
+# Uncomment the below if you want to remove logging of client region caching'
+# and scan of .META. messages
+# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=INFO
+# log4j.logger.org.apache.hadoop.hbase.client.MetaScanner=INFO
+
+    </value>
+    <value-attributes>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-hbase-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-hbase-policy.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-hbase-policy.xml
new file mode 100755
index 0000000..febbd44
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-hbase-policy.xml
@@ -0,0 +1,53 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="true">
+  <property>
+    <name>security.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HRegionInterface protocol implementations (ie.
+      clients talking to HRegionServers)
+      The ACL is a comma-separated list of user and group names. The user and
+      group list is separated by a blank. For e.g. "alice,bob users,wheel".
+      A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.admin.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HMasterInterface protocol implementation (ie.
+      clients talking to HMaster for admin operations).
+      The ACL is a comma-separated list of user and group names. The user and
+      group list is separated by a blank. For e.g. "alice,bob users,wheel".
+      A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.masterregion.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HMasterRegionInterface protocol implementations
+      (for HRegionServers communicating with HMaster)
+      The ACL is a comma-separated list of user and group names. The user and
+      group list is separated by a blank. For e.g. "alice,bob users,wheel".
+      A special value of "*" means all users are allowed.</description>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-hbase-security-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-hbase-security-site.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-hbase-security-site.xml
new file mode 100755
index 0000000..5e7bc518
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-hbase-security-site.xml
@@ -0,0 +1,149 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>ams.zookeeper.keytab</name>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+  <property>
+    <name>ams.zookeeper.principal</name>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hadoop.security.authentication</name>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.coprocessor.master.classes</name>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.coprocessor.region.classes</name>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.master.kerberos.principal</name>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.master.keytab.file</name>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.myclient.keytab</name>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.myclient.principal</name>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.regionserver.kerberos.principal</name>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.regionserver.keytab.file</name>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.security.authentication</name>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.security.authorization</name>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.authProvider.1</name>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.jaasLoginRenew</name>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.kerberos.removeHostFromPrincipal</name>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.kerberos.removeRealmFromPrincipal</name>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+  <property>
+    <name>zookeeper.znode.parent</name>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-hbase-site.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-hbase-site.xml
new file mode 100755
index 0000000..00d0a2c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-hbase-site.xml
@@ -0,0 +1,384 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>hbase.rootdir</name>
+    <value>file:///var/lib/ambari-metrics-collector/hbase</value>
+    <description>
+      Ambari Metrics service uses HBase as default storage backend. Set the rootdir for
+      HBase to either local filesystem path if using Ambari Metrics in embedded mode or
+      to a HDFS dir, example: hdfs://namenode.example.org:8020/amshbase.
+    </description>
+  </property>
+  <property>
+    <name>hbase.tmp.dir</name>
+    <value>/var/lib/ambari-metrics-collector/hbase-tmp</value>
+    <description>
+      Temporary directory on the local filesystem.
+      Change this setting to point to a location more permanent
+      than '/tmp' (The '/tmp' directory is often cleared on
+      machine restart).
+    </description>
+  </property>
+  <property>
+    <name>hbase.local.dir</name>
+    <value>${hbase.tmp.dir}/local</value>
+    <description>Directory on the local filesystem to be used as a local storage
+    </description>
+  </property>
+  <property>
+    <name>hbase.cluster.distributed</name>
+    <value>false</value>
+    <description>
+      The mode the cluster will be in. Possible values are false for
+      standalone mode and true for distributed mode. If false, startup will run
+      all HBase and ZooKeeper daemons together in the one JVM.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.wait.on.regionservers.mintostart</name>
+    <value>1</value>
+    <description>
+      Ensure that HBase Master waits for # many region server to start.
+    </description>
+  </property>
+  <property>
+    <name>hbase.zookeeper.quorum</name>
+    <value>{{zookeeper_quorum_hosts}}</value>
+    <description>Comma separated list of servers in the ZooKeeper Quorum.
+      For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
+      By default this is set to localhost for local and pseudo-distributed modes
+      of operation. For a fully-distributed setup, this should be set to a full
+      list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
+      this is the list of servers which we will start/stop ZooKeeper on.
+    </description>
+    <final>true</final>
+  </property>
+  <property>
+    <name>hbase.master.info.bindAddress</name>
+    <value>0.0.0.0</value>
+    <description>The bind address for the HBase Master web UI</description>
+  </property>
+  <property>
+    <name>hbase.master.info.port</name>
+    <value>61310</value>
+    <description>The port for the HBase Master web UI.</description>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port</name>
+    <value>61330</value>
+    <description>The port for the HBase RegionServer web UI.</description>
+  </property>
+  <property>
+    <name>hbase.master.port</name>
+    <value>61300</value>
+    <description>The port for the HBase Master web UI.</description>
+  </property>
+  <property>
+    <name>hbase.regionserver.port</name>
+    <value>61320</value>
+    <description>The port for the HBase RegionServer web UI.</description>
+  </property>
+  <property>
+    <name>hbase.hregion.majorcompaction</name>
+    <value>0</value>
+    <description>
+      The time (in milliseconds) between 'major' compactions of all
+      HStoreFiles in a region.
+      0 to disable automated major compactions.
+    </description>
+  </property>
+  <property>
+    <name>phoenix.query.spoolThresholdBytes</name>
+    <value>20971520</value>
+    <description>
+      Threshold size in bytes after which results from parallelly executed
+      query results are spooled to disk. Default is 20 mb.
+    </description>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.dataDir</name>
+    <value>${hbase.tmp.dir}/zookeeper</value>
+    <description>
+      Property from ZooKeeper's config zoo.cfg.
+      The directory where the snapshot is stored.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.scanner.caching</name>
+    <value>10000</value>
+    <description>
+      Number of rows that will be fetched when calling next on a scanner
+      if it is not served from (local, client) memory.
+    </description>
+  </property>
+  <property>
+    <name>hbase.normalizer.enabled</name>
+    <value>true</value>
+    <description>If set to true, Master will try to keep region size
+    within each table approximately the same.</description>
+  </property>
+  <property>
+    <name>hbase.normalizer.period</name>
+    <value>600000</value>
+    <description>Period in ms at which the region normalizer runs in the Master.</description>
+  </property>
+  <property>
+    <name>hbase.master.normalizer.class</name>
+    <value>org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer</value>
+    <description>
+      Class used to execute the region normalization when the period occurs.
+      See the class comment for more on how it works
+      http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html
+    </description>
+  </property>
+  <property>
+    <name>hfile.block.cache.size</name>
+    <value>0.3</value>
+    <description>
+      Percentage of maximum heap (-Xmx setting) to allocate to block cache
+      used by a StoreFile. Default of 0.4 means allocate 40%.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.global.memstore.upperLimit</name>
+    <value>0.5</value>
+    <description>
+      Maximum size of all memstores in a region server before new
+      updates are blocked and flushes are forced. Defaults to 40% of heap
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.global.memstore.lowerLimit</name>
+    <value>0.4</value>
+    <description>
+      When memstores are being forced to flush to make room in
+      memory, keep flushing until we hit this mark. Defaults to 35% of heap.
+      This value equal to hbase.regionserver.global.memstore.upperLimit causes
+      the minimum possible flushing to occur when updates are blocked due to
+      memstore limiting.
+    </description>
+  </property>
+  <property>
+    <name>phoenix.groupby.maxCacheSize</name>
+    <value>307200000</value>
+    <description>
+      Size in bytes of pages cached during GROUP BY spilling. Default is 100Mb.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.max.filesize</name>
+    <value>4294967296</value>
+    <description>
+      Maximum HFile size. If the sum of the sizes of a region’s HFiles has grown
+      to exceed this value, the region is split in two. Default is 10Gb.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.block.multiplier</name>
+    <value>4</value>
+    <description>
+      Block updates if memstore has hbase.hregion.memstore.block.multiplier
+      times hbase.hregion.memstore.flush.size bytes. Useful preventing runaway
+      memstore during spikes in update traffic.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hstore.flusher.count</name>
+    <value>2</value>
+    <description>
+      The number of flush threads. With fewer threads, the MemStore flushes
+      will be queued. With more threads, the flushes will be executed in parallel,
+      increasing the load on HDFS, and potentially causing more compactions.
+    </description>
+  </property>
+  <property>
+    <name>phoenix.query.timeoutMs</name>
+    <value>1200000</value>
+    <description>
+      Number of milliseconds after which a query will timeout on the client.
+      Default is 10 min.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.scanner.timeout.period</name>
+    <value>900000</value>
+    <description>
+      Client scanner lease period in milliseconds.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.thread.compaction.large</name>
+    <value>2</value>
+    <description>
+      Configuration key for the large compaction threads.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.thread.compaction.small</name>
+    <value>3</value>
+    <description>
+      Configuration key for the small compaction threads.
+    </description>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.clientPort</name>
+    <value>61181</value>
+  </property>
+  <property>
+    <name>hbase.zookeeper.peerport</name>
+    <value>61288</value>
+  </property>
+  <property>
+    <name>hbase.zookeeper.leaderport</name>
+    <value>61388</value>
+  </property>
+  <property>
+    <name>hbase.hstore.blockingStoreFiles</name>
+    <value>200</value>
+    <description>
+      If more than this number of StoreFiles exist in any one Store
+      (one StoreFile is written per flush of MemStore), updates are blocked for
+      this region until a compaction is completed, or until
+      hbase.hstore.blockingWaitTime has been exceeded.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.flush.size</name>
+    <value>134217728</value>
+    <description>
+      Memstore will be flushed to disk if size of the memstore exceeds this
+      number of bytes. Value is checked by a thread that runs every
+      hbase.server.thread.wakefrequency.
+    </description>
+  </property>
+  <property>
+    <name>phoenix.query.spoolThresholdBytes</name>
+    <value>12582912</value>
+  </property>
+  <property>
+    <name>hbase.snapshot.enabled</name>
+    <value>false</value>
+    <description>Enable/Disable HBase snapshots.</description>
+  </property>
+  <property>
+    <name>hbase.replication</name>
+    <value>false</value>
+    <description>Enable/Disable HBase replication.</description>
+  </property>
+  <property>
+    <name>zookeeper.session.timeout</name>
+    <value>120000</value>
+    <description>ZooKeeper session timeout in milliseconds.</description>
+  </property>
+  <property>
+    <name>zookeeper.session.timeout.localHBaseCluster</name>
+    <value>120000</value>
+    <description>
+      ZooKeeper session timeout in milliseconds for
+      pseudo distributed mode.
+    </description>
+  </property>
+  <property>
+    <name>phoenix.sequence.saltBuckets</name>
+    <value>2</value>
+    <description>
+      Controls the number of pre-allocated regions for SYSTEM.SEQUENCE table.
+    </description>
+  </property>
+  <property>
+    <name>phoenix.query.maxGlobalMemoryPercentage</name>
+    <value>15</value>
+    <description>
+      Percentage of total heap memory (i.e. Runtime.getRuntime().maxMemory())
+      that all threads may use.
+    </description>
+  </property>
+  <property>
+    <name>phoenix.spool.directory</name>
+    <value>${hbase.tmp.dir}/phoenix-spool</value>
+    <description>
+      Set directory for Phoenix spill files. If possible set this to a
+      different mount point from the one for hbase.rootdir in embedded mode.
+    </description>
+  </property>
+  <property>
+    <name>phoenix.mutate.batchSize</name>
+    <value>10000</value>
+    <description>
+      The number of rows that are batched together and automatically committed
+      during the execution of an UPSERT SELECT or DELETE statement.
+      This affects performance of group by aggregators if they are being used.
+    </description>
+  </property>
+  <property>
+    <name>phoenix.query.rowKeyOrderSaltedTable</name>
+    <value>true</value>
+    <description>
+      When set, we disallow user specified split points on salted table to ensure
+      that each bucket will only contains entries with the same salt byte.
+      When this property is turned on, the salted table would behave just like
+      a normal table and would return items in rowkey order for scans
+    </description>
+  </property>
+  <property>
+    <name>phoenix.coprocessor.maxServerCacheTimeToLiveMs</name>
+    <value>60000</value>
+    <description>
+      Maximum living time (in milliseconds) of server caches. A cache entry
+      expires after this amount of time has passed since last access. Consider
+      adjusting this parameter when a server-side IOException(
+      “Could not find hash cache for joinId”) happens. Getting warnings like
+      “Earlier hash cache(s) might have expired on servers” might also be a
+      sign that this number should be increased.
+    </description>
+  </property>
+  <property>
+    <name>phoenix.coprocessor.maxMetaDataCacheSize</name>
+    <value>20480000</value>
+    <description>
+      Max size in bytes of total server-side metadata cache after which
+      evictions will begin to occur based on least recent access time.
+      Default is 20Mb
+    </description>
+  </property>
+  <property>
+    <name>dfs.client.read.shortcircuit</name>
+    <value>true</value>
+    <description>Enable/Disable short circuit read for your client.
+      Hadoop servers should be configured to allow short circuit read
+      for the hbase user for this to take effect
+    </description>
+    <depends-on>
+      <property>
+        <type>ams-hbase-site</type>
+        <name>hbase.cluster.distributed</name>
+      </property>
+      <property>
+        <type>ams-hbase-site</type>
+        <name>hbase.rootdir</name>
+      </property>
+    </depends-on>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-log4j.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-log4j.xml
new file mode 100755
index 0000000..2b0a4cf
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-log4j.xml
@@ -0,0 +1,65 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <description>Custom log4j.properties</description>
+    <value>
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Define some default values that can be overridden by system properties
+ams.log.dir=.
+ams.log.file=ambari-metrics-collector.log
+
+# Root logger option
+log4j.rootLogger=INFO,file
+
+# Direct log messages to a log file
+log4j.appender.file=org.apache.log4j.RollingFileAppender
+log4j.appender.file.File=${ams.log.dir}/${ams.log.file}
+log4j.appender.file.MaxFileSize=80MB
+log4j.appender.file.MaxBackupIndex=60
+log4j.appender.file.layout=org.apache.log4j.PatternLayout
+log4j.appender.file.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+    </value>
+    <value-attributes>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+  </property>
+
+</configuration>


[22/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/configuration/oozie-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/configuration/oozie-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/configuration/oozie-env.xml
new file mode 100755
index 0000000..f4b161e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/configuration/oozie-env.xml
@@ -0,0 +1,189 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>oozie_user</name>
+    <display-name>Oozie User</display-name>
+    <value>oozie</value>
+    <property-type>USER</property-type>
+    <description>Oozie User.</description>
+  </property>
+  <property>
+    <name>oozie_admin_users</name>
+    <value>{oozie_user}, oozie-admin</value>
+    <description>Oozie admin users.</description>
+  </property>
+  <property>
+    <name>oozie_database</name>
+    <display-name>Oozie Database</display-name>
+    <value>New Derby Database</value>
+    <description>Oozie Server Database.</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+  <property>
+    <name>oozie_derby_database</name>
+    <value>Derby</value>
+    <description>Oozie Derby Database</description>
+  </property>
+  <property>
+    <name>oozie_data_dir</name>
+    <display-name>Oozie Data Dir</display-name>
+    <value>/hadoop/oozie/data</value>
+    <description>Data directory in which the Oozie DB exists</description>
+    <value-attributes>
+      <type>directory</type>
+      <empty-value-valid>true</empty-value-valid>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+  <property>
+    <name>oozie_log_dir</name>
+    <display-name>Oozie Log Dir</display-name>
+    <value>/var/log/oozie</value>
+    <description>Directory for oozie logs</description>
+    <value-attributes>
+      <type>directory</type>
+      <editable-only-at-install>true</editable-only-at-install>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+  <property>
+    <name>oozie_pid_dir</name>
+    <display-name>Oozie PID Dir</display-name>
+    <value>/var/run/oozie</value>
+    <description>Directory in which the pid files for oozie reside.</description>
+    <value-attributes>
+      <type>directory</type>
+      <editable-only-at-install>true</editable-only-at-install>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+  <property>
+    <name>oozie_admin_port</name>
+    <display-name>Oozie Server Admin Port</display-name>
+    <value>11001</value>
+    <description>The admin port Oozie server runs.</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>int</type>
+    </value-attributes>
+  </property>
+  <property>
+    <name>oozie_initial_heapsize</name>
+    <value>1024</value>
+    <description>Oozie initial heap size.</description>
+  </property>
+  <property>
+    <name>oozie_heapsize</name>
+    <value>2048</value>
+    <description>Oozie heap size.</description>
+  </property>
+  <property>
+    <name>oozie_permsize</name>
+    <value>256</value>
+    <description>Oozie permanent generation size.</description>
+  </property>
+
+
+  <!-- oozie-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for oozie-env.sh file</description>
+    <value>
+#!/bin/bash
+
+if [ -d "/usr/lib/bigtop-tomcat" ]; then
+  export OOZIE_CONFIG=${OOZIE_CONFIG:-{{conf_dir}}}
+  export CATALINA_BASE={{oozie_server_dir}}
+  export CATALINA_TMPDIR=${CATALINA_TMPDIR:-/var/tmp/oozie}
+  export OOZIE_CATALINA_HOME=/usr/lib/bigtop-tomcat
+fi
+
+#Set JAVA HOME
+export JAVA_HOME={{java_home}}
+
+export JRE_HOME=${JAVA_HOME}
+
+# Set Oozie specific environment variables here.
+
+# Settings for the Embedded Tomcat that runs Oozie
+# Java System properties for Oozie should be specified in this variable
+# This is needed so that Oozie does not run into OOM or GC Overhead limit
+# exceeded exceptions. If the oozie server is handling large number of
+# workflows/coordinator jobs, the memory settings may need to be revised
+
+{% if java_version &lt; 8 %}
+export CATALINA_OPTS="$CATALINA_OPTS -Xms{{oozie_initial_heapsize}} -Xmx{{oozie_heapsize}} -XX:MaxPermSize={{oozie_permsize}}"
+{% else %}
+export CATALINA_OPTS="$CATALINA_OPTS -Xms{{oozie_initial_heapsize}} -Xmx{{oozie_heapsize}} -XX:MaxMetaspaceSize={{oozie_permsize}}"
+{% endif %}
+# Oozie configuration file to load from Oozie configuration directory
+#
+# export OOZIE_CONFIG_FILE=oozie-site.xml
+
+# Oozie logs directory
+#
+export OOZIE_LOG={{oozie_log_dir}}
+
+# Oozie pid directory
+#
+export CATALINA_PID={{pid_file}}
+
+#Location of the data for oozie
+export OOZIE_DATA={{oozie_data_dir}}
+
+# Oozie Log4J configuration file to load from Oozie configuration directory
+#
+# export OOZIE_LOG4J_FILE=oozie-log4j.properties
+
+# Reload interval of the Log4J configuration file, in seconds
+#
+# export OOZIE_LOG4J_RELOAD=10
+
+# The port Oozie server runs
+#
+export OOZIE_HTTP_PORT={{oozie_server_port}}
+
+# The admin port Oozie server runs
+#
+export OOZIE_ADMIN_PORT={{oozie_server_admin_port}}
+
+# The host name Oozie server runs on
+#
+# export OOZIE_HTTP_HOSTNAME=`hostname -f`
+
+# The base URL for callback URLs to Oozie
+#
+# export OOZIE_BASE_URL="http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie"
+export JAVA_LIBRARY_PATH={{hadoop_lib_home}}/native
+
+# At least 1 minute of retry time to account for server downtime during
+# upgrade/downgrade
+export OOZIE_CLIENT_OPTS="${OOZIE_CLIENT_OPTS} -Doozie.connection.retry.count=5 "
+
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/configuration/oozie-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/configuration/oozie-log4j.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/configuration/oozie-log4j.xml
new file mode 100755
index 0000000..2ca87c3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/configuration/oozie-log4j.xml
@@ -0,0 +1,146 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="true">
+
+  <property>
+    <name>content</name>
+    <description>Custom log4j.properties</description>
+    <value>
+      #
+      # Licensed to the Apache Software Foundation (ASF) under one
+      # or more contributor license agreements.  See the NOTICE file
+      # distributed with this work for additional information
+      # regarding copyright ownership.  The ASF licenses this file
+      # to you under the Apache License, Version 2.0 (the
+      # "License"); you may not use this file except in compliance
+      # with the License.  You may obtain a copy of the License at
+      #
+      #      http://www.apache.org/licenses/LICENSE-2.0
+      #
+      # Unless required by applicable law or agreed to in writing, software
+      # distributed under the License is distributed on an "AS IS" BASIS,
+      # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+      # See the License for the specific language governing permissions and
+      # limitations under the License.
+      #
+
+      #    http://www.apache.org/licenses/LICENSE-2.0
+      #
+      # Unless required by applicable law or agreed to in writing, software
+      # distributed under the License is distributed on an "AS IS" BASIS,
+      # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+      # See the License for the specific language governing permissions and
+      # limitations under the License. See accompanying LICENSE file.
+      #
+
+      # If the Java System property 'oozie.log.dir' is not defined at Oozie start up time
+      # XLogService sets its value to '${oozie.home}/logs'
+
+      # The appender that Oozie uses must be named 'oozie' (i.e. log4j.appender.oozie)
+
+      # Using the RollingFileAppender with the OozieRollingPolicy will roll the log file every hour and retain up to MaxHistory number of
+      # log files. If FileNamePattern ends with ".gz" it will create gzip files.
+      log4j.appender.oozie=org.apache.log4j.rolling.RollingFileAppender
+      log4j.appender.oozie.RollingPolicy=org.apache.oozie.util.OozieRollingPolicy
+      log4j.appender.oozie.File=${oozie.log.dir}/oozie.log
+      log4j.appender.oozie.Append=true
+      log4j.appender.oozie.layout=org.apache.log4j.PatternLayout
+      log4j.appender.oozie.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - SERVER[${oozie.instance.id}] %m%n
+      # The FileNamePattern must end with "-%d{yyyy-MM-dd-HH}.gz" or "-%d{yyyy-MM-dd-HH}" and also start with the
+      # value of log4j.appender.oozie.File
+      log4j.appender.oozie.RollingPolicy.FileNamePattern=${log4j.appender.oozie.File}-%d{yyyy-MM-dd-HH}
+      # The MaxHistory controls how many log files will be retained (6480 hours / 24 hours per day = 270 days); -1 to disable
+      log4j.appender.oozie.RollingPolicy.MaxHistory=6480
+
+
+
+      log4j.appender.oozieError=org.apache.log4j.rolling.RollingFileAppender
+      log4j.appender.oozieError.RollingPolicy=org.apache.oozie.util.OozieRollingPolicy
+      log4j.appender.oozieError.File=${oozie.log.dir}/oozie-error.log
+      log4j.appender.oozieError.Append=true
+      log4j.appender.oozieError.layout=org.apache.log4j.PatternLayout
+      log4j.appender.oozieError.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - SERVER[${oozie.instance.id}] %m%n
+      # The FileNamePattern must end with "-%d{yyyy-MM-dd-HH}.gz" or "-%d{yyyy-MM-dd-HH}" and also start with the
+      # value of log4j.appender.oozieError.File
+      log4j.appender.oozieError.RollingPolicy.FileNamePattern=${log4j.appender.oozieError.File}-%d{yyyy-MM-dd-HH}
+      # The MaxHistory controls how many log files will be retained (6480 hours / 24 hours per day = 270 days); -1 to disable
+      log4j.appender.oozieError.RollingPolicy.MaxHistory=6480
+      log4j.appender.oozieError.filter.1 = org.apache.log4j.varia.LevelMatchFilter
+      log4j.appender.oozieError.filter.1.levelToMatch = WARN
+      log4j.appender.oozieError.filter.2 = org.apache.log4j.varia.LevelMatchFilter
+      log4j.appender.oozieError.filter.2.levelToMatch = ERROR
+      log4j.appender.oozieError.filter.3 = org.apache.log4j.varia.LevelMatchFilter
+      log4j.appender.oozieError.filter.3.levelToMatch = FATAL
+      log4j.appender.oozieError.filter.4 = org.apache.log4j.varia.DenyAllFilter
+
+
+
+      # Uncomment the below two lines to use the DailyRollingFileAppender instead
+      # The DatePattern must end with either "dd" or "HH"
+      #log4j.appender.oozie=org.apache.log4j.DailyRollingFileAppender
+      #log4j.appender.oozie.DatePattern='.'yyyy-MM-dd-HH
+
+      log4j.appender.oozieops=org.apache.log4j.DailyRollingFileAppender
+      log4j.appender.oozieops.DatePattern='.'yyyy-MM-dd
+      log4j.appender.oozieops.File=${oozie.log.dir}/oozie-ops.log
+      log4j.appender.oozieops.Append=true
+      log4j.appender.oozieops.layout=org.apache.log4j.PatternLayout
+      log4j.appender.oozieops.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+      log4j.appender.oozieinstrumentation=org.apache.log4j.DailyRollingFileAppender
+      log4j.appender.oozieinstrumentation.DatePattern='.'yyyy-MM-dd
+      log4j.appender.oozieinstrumentation.File=${oozie.log.dir}/oozie-instrumentation.log
+      log4j.appender.oozieinstrumentation.Append=true
+      log4j.appender.oozieinstrumentation.layout=org.apache.log4j.PatternLayout
+      log4j.appender.oozieinstrumentation.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+      log4j.appender.oozieaudit=org.apache.log4j.rolling.RollingFileAppender
+      log4j.appender.oozieaudit.RollingPolicy=org.apache.oozie.util.OozieRollingPolicy
+      log4j.appender.oozieaudit.File=${oozie.log.dir}/oozie-audit.log
+      log4j.appender.oozieaudit.Append=true
+      log4j.appender.oozieaudit.layout=org.apache.log4j.PatternLayout
+      log4j.appender.oozieaudit.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+      log4j.appender.oozieaudit.RollingPolicy.FileNamePattern=${log4j.appender.oozieaudit.File}.%d{yyyy-MM-dd}
+      log4j.appender.oozieaudit.RollingPolicy.MaxHistory=6480
+
+
+      log4j.appender.openjpa=org.apache.log4j.DailyRollingFileAppender
+      log4j.appender.openjpa.DatePattern='.'yyyy-MM-dd
+      log4j.appender.openjpa.File=${oozie.log.dir}/oozie-jpa.log
+      log4j.appender.openjpa.Append=true
+      log4j.appender.openjpa.layout=org.apache.log4j.PatternLayout
+      log4j.appender.openjpa.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+      log4j.logger.openjpa=INFO, openjpa
+      log4j.logger.oozieops=INFO, oozieops
+      log4j.logger.oozieinstrumentation=ALL, oozieinstrumentation
+      log4j.logger.oozieaudit=ALL, oozieaudit
+      log4j.logger.org.apache.oozie=INFO, oozie, oozieError
+      log4j.logger.org.apache.hadoop=WARN, oozie
+      log4j.logger.org.mortbay=WARN, oozie
+      log4j.logger.org.hsqldb=WARN, oozie
+      log4j.logger.org.apache.hadoop.security.authentication.server=WARN, oozie
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/configuration/oozie-site.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/configuration/oozie-site.xml
new file mode 100755
index 0000000..1cd7162
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/configuration/oozie-site.xml
@@ -0,0 +1,382 @@
+<?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<configuration supports_final="true">
+
+  <!--
+      Refer to the oozie-default.xml file for the complete list of
+      Oozie configuration properties and their default values.
+  -->
+  <property>
+    <name>oozie.base.url</name>
+    <value>http://localhost:11000/oozie</value>
+    <description>Base Oozie URL.</description>
+  </property>
+
+  <property>
+    <name>oozie.system.id</name>
+    <value>oozie-${user.name}</value>
+    <description>
+      The Oozie system ID.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.systemmode</name>
+    <value>NORMAL</value>
+    <description>
+      System mode for Oozie at startup.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.AuthorizationService.security.enabled</name>
+    <value>true</value>
+    <description>
+      Specifies whether security (user name/admin role) is enabled or not.
+      If disabled any user can manage Oozie system and manage any job.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.PurgeService.older.than</name>
+    <value>30</value>
+    <description>
+      Jobs older than this value, in days, will be purged by the PurgeService.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.PurgeService.purge.interval</name>
+    <value>3600</value>
+    <description>
+      Interval at which the purge service will run, in seconds.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.CallableQueueService.queue.size</name>
+    <value>1000</value>
+    <description>Max callable queue size</description>
+  </property>
+
+  <property>
+    <name>oozie.service.CallableQueueService.threads</name>
+    <value>10</value>
+    <description>Number of threads used for executing callables</description>
+  </property>
+
+  <property>
+    <name>oozie.service.CallableQueueService.callable.concurrency</name>
+    <value>3</value>
+    <description>
+      Maximum concurrency for a given callable type.
+      Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
+      Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
+      All commands that use action executors (action-start, action-end, action-kill and action-check) use
+      the action type as the callable type.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.coord.normal.default.timeout</name>
+    <value>120</value>
+    <description>Default timeout for a coordinator action input check (in minutes) for normal job.
+      -1 means infinite timeout
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.coord.check.maximum.frequency</name>
+    <value>false</value>
+    <description>
+      When true, Oozie will reject any coordinators with a frequency faster than 5 minutes.  It is not recommended to disable
+      this check or submit coordinators with frequencies faster than 5 minutes: doing so can cause unintended behavior and
+      additional system stress.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.db.schema.name</name>
+    <value>oozie</value>
+    <display-name>Database Name</display-name>
+    <description>
+      Oozie DataBase Name
+    </description>
+    <value-attributes>
+      <type>database</type>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>oozie.authentication.type</name>
+    <value>simple</value>
+    <description>
+      Authentication used for Oozie HTTP endpoint, the supported values are: simple | kerberos |
+      #AUTHENTICATION_HANDLER_CLASSNAME#.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.WorkflowAppService.system.libpath</name>
+    <value>/user/${user.name}/share/lib</value>
+    <description>
+      System library path to use for workflow applications.
+      This path is added to workflow application if their job properties sets
+      the property 'oozie.use.system.libpath' to true.
+    </description>
+  </property>
+
+  <property>
+    <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
+    <value>false</value>
+    <description>
+      If set to true, submissions of MapReduce and Pig jobs will include
+      automatically the system library path, thus not requiring users to
+      specify where the Pig JAR files are. Instead, the ones from the system
+      library path are used.
+    </description>
+  </property>
+  <property>
+    <name>oozie.authentication.kerberos.name.rules</name>
+    <value>
+      RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/
+      RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/
+      RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
+      RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
+      DEFAULT
+    </value>
+    <description>The mapping from kerberos principal names to local OS user names.</description>
+  </property>
+  <property>
+    <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
+    <value>*=/usr/iop/current/hadoop-client/conf</value>
+    <description>
+      Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
+      the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
+      used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
+      the relevant Hadoop *-site.xml files. If the path is relative is looked within
+      the Oozie configuration directory; though the path can be absolute (i.e. to point
+      to Hadoop client conf/ directories in the local filesystem.
+    </description>
+  </property>
+  <property>
+    <name>oozie.service.ActionService.executor.ext.classes</name>
+    <value>
+      org.apache.oozie.action.email.EmailActionExecutor,
+      org.apache.oozie.action.hadoop.HiveActionExecutor,
+      org.apache.oozie.action.hadoop.ShellActionExecutor,
+      org.apache.oozie.action.hadoop.SqoopActionExecutor,
+      org.apache.oozie.action.hadoop.DistcpActionExecutor
+    </value>
+    <description>
+      List of ActionExecutors extension classes (separated by commas). Only action types with associated executors can
+      be used in workflows. This property is a convenience property to add extensions to the built in executors without
+      having to include all the built in ones.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.SchemaService.wf.ext.schemas</name>
+    <value>shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd,shell-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd,hive-action-0.3.xsd</value>
+    <description>
+      Schemas for additional actions types. IMPORTANT: if there are no schemas leave a 1 space string, the service
+      trims the value, if empty Configuration assumes it is NULL.
+    </description>
+  </property>
+  <property>
+    <name>oozie.service.JPAService.create.db.schema</name>
+    <value>false</value>
+    <description>
+      Creates Oozie DB.
+
+      If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
+      If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.JPAService.jdbc.driver</name>
+    <value>org.apache.derby.jdbc.EmbeddedDriver</value>
+    <display-name>JDBC Driver Class</display-name>
+    <description>
+      JDBC driver class.
+    </description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>oozie.service.JPAService.jdbc.url</name>
+    <value>jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true</value>
+    <display-name>Database URL</display-name>
+    <description>
+      JDBC URL.
+    </description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>oozie.service.JPAService.jdbc.username</name>
+    <value>oozie</value>
+    <display-name>Database Username</display-name>
+    <description>
+      Database user name to use to connect to the database
+    </description>
+    <value-attributes>
+      <type>db_user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property require-input = "true">
+    <name>oozie.service.JPAService.jdbc.password</name>
+    <value></value>
+    <display-name>Database Password</display-name>
+    <property-type>PASSWORD</property-type>
+    <description>
+      DB user password.
+
+      IMPORTANT: if password is empty leave a 1 space string, the service trims the value,
+      if empty Configuration assumes it is NULL.
+    </description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>oozie.service.JPAService.pool.max.active.conn</name>
+    <value>10</value>
+    <description>
+      Max number of connections.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.services</name>
+    <value>
+      org.apache.oozie.service.SchedulerService,
+      org.apache.oozie.service.InstrumentationService,
+      org.apache.oozie.service.CallableQueueService,
+      org.apache.oozie.service.UUIDService,
+      org.apache.oozie.service.ELService,
+      org.apache.oozie.service.AuthorizationService,
+      org.apache.oozie.service.UserGroupInformationService,
+      org.apache.oozie.service.HadoopAccessorService,
+	  org.apache.oozie.service.JobsConcurrencyService,
+      org.apache.oozie.service.URIHandlerService,
+      org.apache.oozie.service.MemoryLocksService,
+      org.apache.oozie.service.DagXLogInfoService,
+      org.apache.oozie.service.SchemaService,
+      org.apache.oozie.service.LiteWorkflowAppService,
+      org.apache.oozie.service.JPAService,
+      org.apache.oozie.service.StoreService,
+      org.apache.oozie.service.CoordinatorStoreService,
+      org.apache.oozie.service.SLAStoreService,
+      org.apache.oozie.service.DBLiteWorkflowStoreService,
+      org.apache.oozie.service.CallbackService,
+	  org.apache.oozie.service.ShareLibService,
+      org.apache.oozie.service.ActionService,
+      org.apache.oozie.service.ActionCheckerService,
+      org.apache.oozie.service.RecoveryService,
+      org.apache.oozie.service.PurgeService,
+      org.apache.oozie.service.CoordinatorEngineService,
+      org.apache.oozie.service.BundleEngineService,
+      org.apache.oozie.service.DagEngineService,
+      org.apache.oozie.service.CoordMaterializeTriggerService,
+      org.apache.oozie.service.StatusTransitService,
+      org.apache.oozie.service.PauseTransitService,
+      org.apache.oozie.service.GroupsService,
+      org.apache.oozie.service.ProxyUserService,
+	  org.apache.oozie.service.XLogStreamingService,
+      org.apache.oozie.service.JvmPauseMonitorService
+    </value>
+    <description>
+	   All services to be created and managed by Oozie Services singleton.
+      Class names must be separated by commas.
+	</description>
+  </property>
+  <property>
+    <name>oozie.service.URIHandlerService.uri.handlers</name>
+    <value>org.apache.oozie.dependency.FSURIHandler,org.apache.oozie.dependency.HCatURIHandler</value>
+    <description>
+      Enlist the different uri handlers supported for data availability checks.
+    </description>
+  </property>
+  <property>
+    <name>oozie.services.ext</name>
+       <value>org.apache.oozie.service.JMSAccessorService,org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService
+    </value>
+    <description>
+      To add/replace services defined in 'oozie.services' with custom implementations.
+      Class names must be separated by commas.
+    </description>
+  </property>
+  <property>
+    <name>oozie.service.coord.push.check.requeue.interval</name>
+    <value>30000</value>
+    <description>
+      Command re-queue interval for push dependencies (in millisecond).
+    </description>
+  </property>
+  <property>
+    <name>oozie.credentials.credentialclasses</name>
+    <value>hcat=org.apache.oozie.action.hadoop.HCatCredentials</value>
+    <description>
+      Credential Class to be used for HCat.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.SchemaService.wf.ext.schemas</name>
+    <value>shell-action-0.1.xsd,shell-action-0.2.xsd,shell-action-0.3.xsd,email-action-0.1.xsd,email-action-0.2.xsd,hive-action-0.2.xsd,hive-action-0.3.xsd,hive-action-0.4.xsd,hive-action-0.5.xsd,sqoop-action-0.2.xsd,sqoop-action-0.3.xsd,sqoop-action-0.4.xsd,ssh-action-0.1.xsd,ssh-action-0.2.xsd,distcp-action-0.1.xsd,distcp-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd</value>
+  </property>
+
+  <property>
+    <name>oozie.service.AuthorizationService.security.enabled</name>
+    <value>true</value>
+    <description>
+      Specifies whether security (user name/admin role) is enabled or not.
+      If disabled any user can manage Oozie system and manage any job.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.HadoopAccessorService.kerberos.enabled</name>
+    <value>false</value>
+    <description>
+      Indicates if Oozie is configured to use Kerberos.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.authentication.simple.anonymous.allowed</name>
+    <value>true</value>
+    <description>
+      Indicates if anonymous requests are allowed.
+      This setting is meaningful only when using 'simple' authentication.
+    </description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/kerberos.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/kerberos.json
new file mode 100755
index 0000000..dce65d0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/kerberos.json
@@ -0,0 +1,70 @@
+{
+  "services": [
+    {
+      "name": "OOZIE",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/smokeuser"
+        },
+        {
+          "name": "/hdfs"
+        }
+      ],
+      "auth_to_local_properties" : [
+        "oozie-site/oozie.authentication.kerberos.name.rules"
+      ],
+      "configurations": [
+        {
+          "oozie-site": {
+            "oozie.authentication.type": "kerberos",
+            "oozie.service.AuthorizationService.authorization.enabled": "true",
+            "oozie.service.HadoopAccessorService.kerberos.enabled": "true",
+            "local.realm": "${realm}",
+	          "oozie.credentials.credentialclasses": "hcat=org.apache.oozie.action.hadoop.HCatCredentials,hive2=org.apache.oozie.action.hadoop.Hive2Credentials",
+            "oozie.authentication.kerberos.name.rules": ""
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "OOZIE_SERVER",
+          "identities": [
+            {
+              "name": "oozie_server",
+              "principal": {
+                "value": "oozie/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "oozie-site/oozie.service.HadoopAccessorService.kerberos.principal",
+                "local_username" : "${oozie-env/oozie_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/oozie.service.keytab",
+                "owner": {
+                  "name": "${oozie-env/oozie_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "oozie-site/oozie.service.HadoopAccessorService.keytab.file"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "oozie-site/oozie.authentication.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "oozie-site/oozie.authentication.kerberos.keytab"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/metainfo.xml
new file mode 100755
index 0000000..2835c84
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/metainfo.xml
@@ -0,0 +1,176 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>OOZIE</name>
+      <displayName>Oozie</displayName>
+      <comment>System for workflow coordination and execution of Apache Hadoop jobs.  This also includes the installation of the optional Oozie Web Console which relies on and will install the &lt;a target="_blank" href="http://www.sencha.com/legal/open-source-faq/"&gt;ExtJS&lt;/a&gt; Library.</comment>
+      <version>4.2.0</version>
+      <components>
+        <component>
+          <name>OOZIE_SERVER</name>
+          <displayName>Oozie Server</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>YARN/YARN_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/oozie_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>OOZIE_CLIENT</name>
+          <displayName>Oozie Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/oozie_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>xml</type>
+              <fileName>oozie-site.xml</fileName>
+              <dictionaryName>oozie-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>oozie-env.sh</fileName>
+              <dictionaryName>oozie-env</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>oozie-log4j.properties</fileName>
+              <dictionaryName>oozie-log4j</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+      </components>
+
+       <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>zip</name>
+            </package>
+            <package>
+              <name>mysql-connector-java</name>
+            </package>
+            <package>
+              <name>extjs</name>
+            </package>
+            <package>
+              <name>oozie-client</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>oozie</name>
+            </package>
+            <!--package>
+              <name>falcon</name>
+            </package-->
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>oozie</name>
+            </package>
+            <!--package>
+              <name>falcon</name>
+            </package-->
+            <package>
+              <name>extjs</name>
+            </package>
+            <package>
+              <name>libxml2-utils</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <requiredServices>
+        <service>YARN</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>oozie-site</config-type>
+        <config-type>oozie-env</config-type>
+        <config-type>oozie-log4j</config-type>
+        <config-type>yarn-site</config-type>
+        <config-type>hive-site</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/alerts/alert_check_oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/alerts/alert_check_oozie_server.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/alerts/alert_check_oozie_server.py
new file mode 100755
index 0000000..7c26775
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/alerts/alert_check_oozie_server.py
@@ -0,0 +1,211 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+import os
+import re
+
+from resource_management.core import global_lock
+from resource_management.core.environment import Environment
+from resource_management.core.resources import Execute
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions import get_klist_path
+from ambari_commons.os_check import OSConst, OSCheck
+from urlparse import urlparse
+
+RESULT_CODE_OK = 'OK'
+RESULT_CODE_CRITICAL = 'CRITICAL'
+RESULT_CODE_UNKNOWN = 'UNKNOWN'
+
+OOZIE_ENV_HTTPS_RE = r"export\s+OOZIE_HTTPS_PORT=(\d+)"
+
+# The configured Kerberos executable search paths, if any
+KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY = '{{kerberos-env/executable_search_paths}}'
+
+OOZIE_URL_KEY = '{{oozie-site/oozie.base.url}}'
+SECURITY_ENABLED = '{{cluster-env/security_enabled}}'
+OOZIE_USER = '{{oozie-env/oozie_user}}'
+OOZIE_CONF_DIR = '/usr/iop/current/oozie-server/conf'
+OOZIE_CONF_DIR_LEGACY = '/etc/oozie/conf'
+OOZIE_HTTPS_PORT = '{{oozie-site/oozie.https.port}}'
+OOZIE_ENV_CONTENT = '{{oozie-env/content}}'
+
+USER_KEYTAB_KEY = '{{oozie-site/oozie.service.HadoopAccessorService.keytab.file}}'
+USER_PRINCIPAL_KEY = '{{oozie-site/oozie.service.HadoopAccessorService.kerberos.principal}}'
+USER_KEY = '{{oozie-env/oozie_user}}'
+
+# default keytab location
+USER_KEYTAB_SCRIPT_PARAM_KEY = 'default.oozie.keytab'
+USER_KEYTAB_DEFAULT = '/etc/security/keytabs/oozie.headless.keytab'
+
+# default user principal
+USER_PRINCIPAL_SCRIPT_PARAM_KEY = 'default.oozie.principal'
+USER_PRINCIPAL_DEFAULT = 'oozie@EXAMPLE.COM'
+
+# default user
+USER_DEFAULT = 'oozie'
+
+class KerberosPropertiesNotFound(Exception): pass
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (OOZIE_URL_KEY, USER_PRINCIPAL_KEY, SECURITY_ENABLED, USER_KEYTAB_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY,
+          USER_KEY, OOZIE_HTTPS_PORT, OOZIE_ENV_CONTENT)
+
+def get_check_command(oozie_url, host_name, configurations, parameters, only_kinit):
+  kerberos_env = None
+
+  user = USER_DEFAULT
+  if USER_KEY in configurations:
+    user = configurations[USER_KEY]
+
+  if is_security_enabled(configurations):
+    # defaults
+    user_keytab = USER_KEYTAB_DEFAULT
+    user_principal = USER_PRINCIPAL_DEFAULT
+
+    # check script params
+    if USER_PRINCIPAL_SCRIPT_PARAM_KEY in parameters:
+      user_principal = parameters[USER_PRINCIPAL_SCRIPT_PARAM_KEY]
+      user_principal = user_principal.replace('_HOST', host_name.lower())
+    if USER_KEYTAB_SCRIPT_PARAM_KEY in parameters:
+      user_keytab = parameters[USER_KEYTAB_SCRIPT_PARAM_KEY]
+
+    # check configurations last as they should always take precedence
+    if USER_PRINCIPAL_KEY in configurations:
+      user_principal = configurations[USER_PRINCIPAL_KEY]
+      user_principal = user_principal.replace('_HOST', host_name.lower())
+    if USER_KEYTAB_KEY in configurations:
+      user_keytab = configurations[USER_KEYTAB_KEY]
+
+    # Create the kerberos credentials cache (ccache) file and set it in the environment to use
+    # when executing curl
+    env = Environment.get_instance()
+    ccache_file = "{0}{1}oozie_alert_cc_{2}".format(env.tmp_dir, os.sep, os.getpid())
+    kerberos_env = {'KRB5CCNAME': ccache_file}
+
+    # Get the configured Kerberos executable search paths, if any
+    kerberos_executable_search_paths = None
+    if KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY in configurations:
+      kerberos_executable_search_paths = configurations[KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY]
+
+    klist_path_local = get_klist_path(kerberos_executable_search_paths)
+    kinit_path_local = get_kinit_path(kerberos_executable_search_paths)
+    kinit_part_command = format("{kinit_path_local} -l 5m20s -c {ccache_file} -kt {user_keytab} {user_principal}; ")
+
+    # Determine if we need to kinit by testing to see if the relevant cache exists and has
+    # non-expired tickets.  Tickets are marked to expire after 5 minutes to help reduce the number
+    # it kinits we do but recover quickly when keytabs are regenerated
+
+    if only_kinit:
+      kinit_command = kinit_part_command
+    else:
+      kinit_command = "{0} -s {1} || ".format(klist_path_local, ccache_file) + kinit_part_command
+
+    # prevent concurrent kinit
+    kinit_lock = global_lock.get_lock(global_lock.LOCK_TYPE_KERBEROS)
+    kinit_lock.acquire()
+    try:
+      Execute(kinit_command, environment=kerberos_env, user=user)
+    finally:
+      kinit_lock.release()
+
+  oozie_config_directory = OOZIE_CONF_DIR_LEGACY
+  if os.path.exists(OOZIE_CONF_DIR):
+    oozie_config_directory = OOZIE_CONF_DIR
+
+  command = "source {0}/oozie-env.sh ; oozie admin -oozie {1} -status".format(
+    oozie_config_directory, oozie_url)
+
+  return (command, kerberos_env, user)
+
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  if configurations is None:
+    return (RESULT_CODE_UNKNOWN, ['There were no configurations supplied to the script.'])
+
+  if not OOZIE_URL_KEY in configurations:
+    return (RESULT_CODE_UNKNOWN, ['The Oozie URL is a required parameter.'])
+
+  https_port = None
+  # try to get https port form oozie-env content
+  if OOZIE_ENV_CONTENT in configurations:
+    for line in configurations[OOZIE_ENV_CONTENT].splitlines():
+      result = re.match(OOZIE_ENV_HTTPS_RE, line)
+
+      if result is not None:
+        https_port = result.group(1)
+  # or from oozie-site.xml
+  if https_port is None and OOZIE_HTTPS_PORT in configurations:
+    https_port = configurations[OOZIE_HTTPS_PORT]
+
+  oozie_url = configurations[OOZIE_URL_KEY]
+
+  # construct proper url for https
+  if https_port is not None:
+    parsed_url = urlparse(oozie_url)
+    oozie_url = oozie_url.replace(parsed_url.scheme, "https")
+    if parsed_url.port is None:
+      oozie_url.replace(parsed_url.hostname, ":".join([parsed_url.hostname, str(https_port)]))
+    else:
+      oozie_url = oozie_url.replace(str(parsed_url.port), str(https_port))
+
+  # https will not work with localhost address, we need put fqdn
+  if https_port is None:
+    oozie_url = oozie_url.replace(urlparse(oozie_url).hostname, host_name)
+
+  (code, msg) = get_check_result(oozie_url, host_name, configurations, parameters, False)
+
+  # sometimes real lifetime for ticket is less than we have set(5m20s aS of now)
+  # so i've added this double check with rekinit command to be sure thaT it's not problem with ticket lifetime
+  if is_security_enabled(configurations) and code == RESULT_CODE_CRITICAL:
+    (code, msg) = get_check_result(oozie_url, host_name, configurations, parameters, True)
+
+  return (code, msg)
+
+
+def get_check_result(oozie_url, host_name, configurations, parameters, only_kinit):
+  try:
+    command, env, user = get_check_command(oozie_url, host_name, configurations, parameters, only_kinit)
+    # execute the command
+    Execute(command, environment=env, user=user)
+
+    return (RESULT_CODE_OK, ["Successful connection to {0}".format(oozie_url)])
+  except KerberosPropertiesNotFound, ex:
+    return (RESULT_CODE_UNKNOWN, [str(ex)])
+  except Exception, ex:
+    return (RESULT_CODE_CRITICAL, [str(ex)])
+
+def is_security_enabled(configurations):
+  security_enabled = False
+  if SECURITY_ENABLED in configurations:
+    security_enabled = str(configurations[SECURITY_ENABLED]).upper() == 'TRUE'
+
+  return security_enabled

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/files/oozieSmoke2.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/files/oozieSmoke2.sh b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/files/oozieSmoke2.sh
new file mode 100755
index 0000000..e0d29b4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/files/oozieSmoke2.sh
@@ -0,0 +1,88 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export os_family=$1
+export oozie_lib_dir=$2
+export oozie_conf_dir=$3
+export oozie_bin_dir=$4
+export oozie_server_url=$5
+export oozie_examples_dir=$6
+export hadoop_conf_dir=$7
+export hadoop_bin_dir=$8
+export smoke_test_user=$9
+export security_enabled=${10}
+export smoke_user_keytab=${11}
+export kinit_path_local=${12}
+export smokeuser_principal=${13}
+
+function getValueFromField {
+  xmllint $1 | grep "<name>$2</name>" -C 2 | grep '<value>' | cut -d ">" -f2 | cut -d "<" -f1
+  return $?
+}
+
+function checkOozieJobStatus {
+  local job_id=$1
+  local num_of_tries=$2
+  #default num_of_tries to 10 if not present
+  num_of_tries=${num_of_tries:-10}
+  local i=0
+  local rc=1
+  local cmd="source ${oozie_conf_dir}/oozie-env.sh ; ${oozie_bin_dir}/oozie job -oozie ${OOZIE_SERVER} -info $job_id"
+  /var/lib/ambari-agent/ambari-sudo.sh su ${smoke_test_user} -s /bin/bash - -c "$cmd"
+  while [ $i -lt $num_of_tries ] ; do
+    cmd_output=`/var/lib/ambari-agent/ambari-sudo.sh su ${smoke_test_user} -s /bin/bash - -c "$cmd"`
+    (IFS='';echo $cmd_output)
+    act_status=$(IFS='';echo $cmd_output | grep ^Status | cut -d':' -f2 | sed 's| ||g')
+    echo "workflow_status=$act_status"
+    if [ "RUNNING" == "$act_status" ]; then
+      #increment the counter and get the status again after waiting for 15 secs
+      sleep 15
+      (( i++ ))
+      elif [ "SUCCEEDED" == "$act_status" ]; then
+        rc=0;
+        break;
+      else
+        rc=1
+        break;
+      fi
+    done
+    return $rc
+}
+
+export OOZIE_EXIT_CODE=0
+export OOZIE_SERVER=$oozie_server_url
+
+cd $oozie_examples_dir
+
+if [[ $security_enabled == "True" ]]; then
+  kinitcmd="${kinit_path_local} -kt ${smoke_user_keytab} ${smokeuser_principal}; "
+else
+  kinitcmd=""
+fi
+
+cmd="${kinitcmd}source ${oozie_conf_dir}/oozie-env.sh ; ${oozie_bin_dir}/oozie -Doozie.auth.token.cache=false job -oozie $OOZIE_SERVER -config $oozie_examples_dir/examples/apps/map-reduce/job.properties  -run"
+echo $cmd
+job_info=`/var/lib/ambari-agent/ambari-sudo.sh su ${smoke_test_user} -s /bin/bash - -c "$cmd" | grep "job:"`
+job_id="`echo $job_info | cut -d':' -f2`"
+checkOozieJobStatus "$job_id" 15
+OOZIE_EXIT_CODE="$?"
+exit $OOZIE_EXIT_CODE

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/files/prepareOozieHdfsDirectories.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/files/prepareOozieHdfsDirectories.sh b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/files/prepareOozieHdfsDirectories.sh
new file mode 100755
index 0000000..79a1bfc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/files/prepareOozieHdfsDirectories.sh
@@ -0,0 +1,45 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export oozie_conf_dir=$1
+export oozie_examples_dir=$2
+export hadoop_conf_dir=$3
+
+function getValueFromField {
+  xmllint $1 | grep "<name>$2</name>" -C 2 | grep '<value>' | cut -d ">" -f2 | cut -d "<" -f1
+  return $?
+}
+
+export JOBTRACKER=`getValueFromField ${hadoop_conf_dir}/yarn-site.xml yarn.resourcemanager.address`
+export NAMENODE=`getValueFromField ${hadoop_conf_dir}/core-site.xml fs.defaultFS`
+
+cd $oozie_examples_dir
+
+/var/lib/ambari-agent/ambari-sudo.sh tar -zxf oozie-examples.tar.gz
+/var/lib/ambari-agent/ambari-sudo.sh chmod -R o+rx examples
+
+/var/lib/ambari-agent/ambari-sudo.sh sed -i "s|nameNode=hdfs://localhost:8020|nameNode=$NAMENODE|g"  examples/apps/map-reduce/job.properties
+/var/lib/ambari-agent/ambari-sudo.sh sed -i "s|nameNode=hdfs://localhost:9000|nameNode=$NAMENODE|g"  examples/apps/map-reduce/job.properties
+/var/lib/ambari-agent/ambari-sudo.sh sed -i "s|jobTracker=localhost:8021|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
+/var/lib/ambari-agent/ambari-sudo.sh sed -i "s|jobTracker=localhost:9001|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
+/var/lib/ambari-agent/ambari-sudo.sh sed -i "s|jobTracker=localhost:8032|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
+/var/lib/ambari-agent/ambari-sudo.sh sed -i "s|oozie.wf.application.path=hdfs://localhost:9000|oozie.wf.application.path=$NAMENODE|g" examples/apps/map-reduce/job.properties

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/files/wrap_ooziedb.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/files/wrap_ooziedb.sh b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/files/wrap_ooziedb.sh
new file mode 100755
index 0000000..bdd43ca
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/files/wrap_ooziedb.sh
@@ -0,0 +1,31 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+OUT=`cd /var/tmp/oozie && /usr/lib/oozie/bin/ooziedb.sh "$@" 2>&1`
+EC=$?
+echo $OUT
+GRVAR=`echo ${OUT} | grep -o "java.lang.Exception: DB schema exists"`
+if [ ${EC} -ne 0 ] && [ -n "$GRVAR" ]
+then
+  exit 0
+else
+  exit $EC
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/oozie.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/oozie.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/oozie.py
new file mode 100755
index 0000000..6cd2b71
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/oozie.py
@@ -0,0 +1,279 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import hashlib
+import os
+
+from resource_management.core.resources import Directory
+from resource_management.core.resources import File
+from resource_management.core.resources.system import Execute
+from resource_management.core.source import DownloadSource
+from resource_management.core.source import InlineTemplate
+from resource_management.core.source import Template
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import compare_versions
+from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.core.resources.packaging import Package
+from resource_management.core.shell import as_user
+
+
+# TODO: see if see can remove this
+def oozie(is_server=False):
+  import params
+
+  if is_server:
+    params.HdfsResource(params.oozie_hdfs_user_dir,
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.oozie_user,
+                         mode=params.oozie_hdfs_user_mode
+    )
+    params.HdfsResource(None, action="execute")
+
+  Directory(params.conf_dir,
+             create_parents = True,
+             owner = params.oozie_user,
+             group = params.user_group
+  )
+  XmlConfig("oozie-site.xml",
+    conf_dir = params.conf_dir,
+    configurations = params.oozie_site,
+    configuration_attributes=params.config['configuration_attributes']['oozie-site'],
+    owner = params.oozie_user,
+    group = params.user_group,
+    mode = 0660
+  )
+  File(format("{conf_dir}/oozie-env.sh"),
+    owner=params.oozie_user,
+    content=InlineTemplate(params.oozie_env_sh_template),
+    group=params.user_group,
+  )
+
+  if (params.log4j_props != None):
+    File(format("{params.conf_dir}/oozie-log4j.properties"),
+      mode=0644,
+      group=params.user_group,
+      owner=params.oozie_user,
+      content=params.log4j_props
+    )
+  elif (os.path.exists(format("{params.conf_dir}/oozie-log4j.properties"))):
+    File(format("{params.conf_dir}/oozie-log4j.properties"),
+      mode=0644,
+      group=params.user_group,
+      owner=params.oozie_user
+    )
+
+  File(format("{params.conf_dir}/adminusers.txt"),
+    mode=0644,
+    group=params.user_group,
+    owner=params.oozie_user,
+    content=Template('adminusers.txt.j2', oozie_user=params.oozie_user)
+  )
+
+  environment = {
+    "no_proxy": format("{ambari_server_hostname}")
+  }
+
+  if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or \
+     params.jdbc_driver_name == "org.postgresql.Driver" or \
+     params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
+    File(format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
+      content = DownloadSource(format("{jdk_location}{check_db_connection_jar_name}")),
+    )
+  pass
+
+  oozie_ownership()
+
+  if is_server:
+    oozie_server_specific()
+
+def download_database_library_if_needed():
+  """
+  Downloads the library to use when connecting to the Oozie database, if
+  necessary. The library will be downloaded to 'params.target' unless
+  otherwise specified.
+  :param target_directory: the location where the database library will be
+  downloaded to.
+  :return:
+  """
+  import params
+  # check to see if the JDBC driver name is in the list of ones that need to
+  # be downloaded
+  if params.jdbc_driver_name=="com.mysql.jdbc.Driver" or \
+     params.jdbc_driver_name=="oracle.jdbc.driver.OracleDriver":
+    File(params.downloaded_custom_connector,
+         content = DownloadSource(params.driver_curl_source),
+    )
+    Execute(('cp', '--remove-destination', params.downloaded_custom_connector, params.target),
+            #creates=params.target, TODO: uncomment after ranger_hive_plugin will not provide jdbc
+            path=["/bin", "/usr/bin/"],
+            sudo = True)
+
+    File ( params.target,
+      owner = params.oozie_user,
+      group = params.user_group
+    )
+
+
+def oozie_ownership():
+  import params
+
+  File ( format("{conf_dir}/hadoop-config.xml"),
+    owner = params.oozie_user,
+    group = params.user_group
+  )
+
+  File ( format("{conf_dir}/oozie-default.xml"),
+    owner = params.oozie_user,
+    group = params.user_group,
+    mode  = 0644
+  )
+
+  Directory ( format("{conf_dir}/action-conf"),
+    owner = params.oozie_user,
+    group = params.user_group
+  )
+
+  File ( format("{conf_dir}/action-conf/hive.xml"),
+    owner = params.oozie_user,
+    group = params.user_group
+  )
+
+def oozie_server_specific():
+  import params
+
+  no_op_test = as_user(format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1"), user=params.oozie_user)
+  File(params.pid_file,
+    action="delete",
+    not_if=no_op_test
+  )
+
+  oozie_server_directories = [format("{oozie_home}/{oozie_tmp_dir}"), params.oozie_pid_dir, params.oozie_log_dir, params.oozie_tmp_dir, params.oozie_data_dir, params.oozie_lib_dir, params.oozie_webapps_dir, params.oozie_webapps_conf_dir, params.oozie_server_dir]
+  Directory( oozie_server_directories,
+    owner = params.oozie_user,
+    group = params.user_group,
+    mode = 0755,
+    create_parents = True,
+    cd_access="a",
+  )
+
+  Directory(params.oozie_libext_dir,
+            create_parents=True,
+  )
+
+  hashcode_file = format("{oozie_home}/.hashcode")
+  hashcode = hashlib.md5(format('{oozie_home}/oozie-sharelib.tar.gz')).hexdigest()
+  skip_recreate_sharelib = format("test -f {hashcode_file} && test -d {oozie_home}/share && [[ `cat {hashcode_file}` == '{hashcode}' ]]")
+
+  untar_sharelib = ('tar','-xvf',format('{oozie_home}/oozie-sharelib.tar.gz'),'-C',params.oozie_home)
+
+  Execute( untar_sharelib,    # time-expensive
+    not_if  = format("{no_op_test} || {skip_recreate_sharelib}"),
+    sudo = True,
+  )
+  configure_cmds = []
+  #configure_cmds.append(('tar','-xvf',format('{oozie_home}/oozie-sharelib.tar.gz'),'-C',params.oozie_home))
+  #configure_cmds.append(('cp', params.ext_js_path, params.oozie_libext_dir))
+  #configure_cmds.append(('chown', format('{oozie_user}:{user_group}'), format('{oozie_libext_dir}/{ext_js_file}')))
+  configure_cmds.append(('chown', '-RL', format('{oozie_user}:{user_group}'), params.oozie_webapps_conf_dir))
+
+
+  Execute( configure_cmds,
+    not_if  = no_op_test,
+    sudo = True,
+  )
+
+  if params.jdbc_driver_name=="com.mysql.jdbc.Driver" or \
+     params.jdbc_driver_name=="oracle.jdbc.driver.OracleDriver":
+    File(params.downloaded_custom_connector,
+         content = DownloadSource(params.driver_curl_source),
+    )
+
+
+    Execute(('cp', '--remove-destination', params.downloaded_custom_connector, params.target),
+            #creates=params.target, TODO: uncomment after ranger_hive_plugin will not provide jdbc
+            path=["/bin", "/usr/bin/"],
+            sudo = True)
+
+    File ( params.target,
+      owner = params.oozie_user,
+      group = params.user_group
+    )
+
+  #falcon el extension
+  if params.has_falcon_host:
+    Execute(format('rm -rf {oozie_libext_dir}/falcon-oozie-el-extension.jar'),)
+    if params.security_enabled:
+      Execute(format('/usr/bin/kinit -kt /etc/security/keytabs/hdfs.headless.keytab {hdfs_principal_name}'))
+    Execute(format('hadoop fs -get /user/falcon/temp/falcon-oozie-el-extension.jar {oozie_libext_dir}'),
+      not_if  = no_op_test,
+    )
+    Execute(format('{sudo} chown {oozie_user}:{user_group} {oozie_libext_dir}/falcon-oozie-el-extension.jar'),
+      not_if  = no_op_test,
+    )
+  #if params.lzo_enabled and len(params.lzo_packages_for_current_host) > 0:
+  #  Package(params.lzo_packages_for_current_host)
+  #  Execute(format('{sudo} cp {hadoop_lib_home}/hadoop-lzo*.jar {oozie_lib_dir}'),
+  #    not_if  = no_op_test,
+  #  )
+
+  prepare_war_cmd_file = format("{oozie_home}/.prepare_war_cmd")
+  prepare_war_cmd = format("cd {oozie_tmp_dir} && {oozie_setup_sh} prepare-war {oozie_secure}")
+  skip_prepare_war_cmd = format("test -f {prepare_war_cmd_file} && [[ `cat {prepare_war_cmd_file}` == '{prepare_war_cmd}' ]]")
+
+  Execute(prepare_war_cmd,    # time-expensive
+    user = params.oozie_user,
+    not_if  = format("{no_op_test} || {skip_recreate_sharelib} && {skip_prepare_war_cmd}")
+  )
+  File(hashcode_file,
+       content = hashcode,
+       mode = 0644,
+  )
+  File(prepare_war_cmd_file,
+       content = prepare_war_cmd,
+       mode = 0644,
+  )
+
+  # Create hive-site and tez-site configs for oozie
+  Directory(params.hive_conf_dir,
+        create_parents = True,
+        owner = params.oozie_user,
+        group = params.user_group
+  )
+  if 'hive-site' in params.config['configurations']:
+      XmlConfig("hive-site.xml",
+        conf_dir=params.hive_conf_dir,
+        configurations=params.config['configurations']['hive-site'],
+        configuration_attributes=params.config['configuration_attributes']['hive-site'],
+        owner=params.oozie_user,
+        group=params.user_group,
+        mode=0640
+  )
+  '''if 'tez-site' in params.config['configurations']:
+      XmlConfig( "tez-site.xml",
+        conf_dir = params.hive_conf_dir,
+        configurations = params.config['configurations']['tez-site'],
+        configuration_attributes=params.config['configuration_attributes']['tez-site'],
+        owner = params.oozie_user,
+        group = params.user_group,
+        mode = 0664
+  )'''
+  Execute(('chown', '-R', format("{oozie_user}:{user_group}"), params.oozie_server_dir),
+          sudo=True
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/oozie_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/oozie_client.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/oozie_client.py
new file mode 100755
index 0000000..b36d2ec
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/oozie_client.py
@@ -0,0 +1,76 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+
+from oozie import oozie
+from oozie_service import oozie_service
+
+
+class OozieClient(Script):
+
+  def get_component_name(self):
+    return "oozie-client"
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    oozie(is_server=False)
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    # this function should not execute if the version can't be determined or
+    # is not at least IOP 4.0.0.0
+    if not params.version or compare_versions(format_stack_version(params.version), '4.0.0.0') < 0:
+      return
+
+    Logger.info("Executing Oozie Client Rolling Upgrade pre-restart")
+    conf_select.select(params.stack_name, "oozie", params.version)
+    stack_select.select("oozie-client", params.version)
+    #Execute(format("stack-select set oozie-client {version}"))
+
+  # We substitute some configs (oozie.authentication.kerberos.principal) before generation (see oozie.py and params.py).
+  # This function returns changed configs (it's used for config generation before config download)
+  def generate_configs_get_xml_file_content(self, filename, dictionary):
+    if dictionary == 'oozie-site':
+      import params
+      config = self.get_config()
+      return {'configurations': params.oozie_site,
+              'configuration_attributes': config['configuration_attributes'][dictionary]}
+    else:
+      return super(OozieClient, self).generate_configs_get_xml_file_content(filename, dictionary)
+
+if __name__ == "__main__":
+  OozieClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/oozie_server.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/oozie_server.py
new file mode 100755
index 0000000..d6d6d37
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/package/scripts/oozie_server.py
@@ -0,0 +1,193 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import oozie_server_upgrade
+
+from resource_management.core import Logger
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.constants import Direction
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions import check_process_status
+from resource_management.libraries.functions import compare_versions
+from resource_management.libraries.functions import format_stack_version
+from resource_management.libraries.functions.security_commons import build_expectations
+from resource_management.libraries.functions.security_commons import cached_kinit_executor
+from resource_management.libraries.functions.security_commons import get_params_from_filesystem
+from resource_management.libraries.functions.security_commons import validate_security_config_properties
+from resource_management.libraries.functions.security_commons import FILE_TYPE_XML
+
+from oozie import oozie
+from oozie_service import oozie_service
+
+
+class OozieServer(Script):
+
+  def get_component_name(self):
+    return "oozie-server"
+
+  def install(self, env):
+    self.install_packages(env)
+
+
+  def configure(self, env, upgrade_type=None):
+    import params
+
+    #TODO: needed?
+    if upgrade_type == "nonrolling" and params.upgrade_direction == Direction.UPGRADE and \
+            params.version and compare_versions(format_stack_version(params.version), '4.1.0.0') >= 0:
+      conf_select.select(params.stack_name, "oozie", params.version)
+      # In order for the "/usr/hdp/current/oozie-<client/server>" point to the new version of
+      # oozie, we need to create the symlinks both for server and client.
+      # This is required as both need to be pointing to new installed oozie version.
+
+      # Sets the symlink : eg: /usr/hdp/current/oozie-client -> /usr/hdp/2.3.x.y-<version>/oozie
+      stack_select.select("oozie-client", params.version)
+      # Sets the symlink : eg: /usr/hdp/current/oozie-server -> /usr/hdp/2.3.x.y-<version>/oozie
+      stack_select.select("oozie-server", params.version)
+
+    env.set_params(params)
+    oozie(is_server=True)
+
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    #TODO remove this when config command will be implemented
+    self.configure(env)
+
+    # preparing the WAR file must run after configure since configure writes out
+    # oozie-env.sh which is needed to have the right environment directories setup!
+    if upgrade_type is not None:
+      oozie_server_upgrade.prepare_warfile();
+
+    oozie_service(action='start', upgrade_type=upgrade_type)
+
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    oozie_service(action='stop', upgrade_type=upgrade_type)
+
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.pid_file)
+
+
+  def security_status(self, env):
+
+    import status_params
+    env.set_params(status_params)
+
+    if status_params.security_enabled:
+      expectations = {
+        "oozie-site":
+          build_expectations('oozie-site',
+                             {
+                               "oozie.authentication.type": "kerberos",
+                               "oozie.service.AuthorizationService.security.enabled": "true",
+                               "oozie.service.HadoopAccessorService.kerberos.enabled": "true"
+                             },
+                             [
+                               "local.realm",
+                               "oozie.authentication.kerberos.principal",
+                               "oozie.authentication.kerberos.keytab",
+                               "oozie.service.HadoopAccessorService.kerberos.principal",
+                               "oozie.service.HadoopAccessorService.keytab.file"
+                             ],
+                             None)
+      }
+
+      security_params = get_params_from_filesystem(status_params.conf_dir,
+                                                   {'oozie-site.xml': FILE_TYPE_XML})
+      result_issues = validate_security_config_properties(security_params, expectations)
+      if not result_issues: # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if ('oozie-site' not in security_params
+              or 'oozie.authentication.kerberos.principal' not in security_params['oozie-site']
+              or 'oozie.authentication.kerberos.keytab' not in security_params['oozie-site']
+              or 'oozie.service.HadoopAccessorService.kerberos.principal' not in security_params['oozie-site']
+              or 'oozie.service.HadoopAccessorService.keytab.file' not in security_params['oozie-site']):
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.oozie_user,
+                                security_params['oozie-site']['oozie.authentication.kerberos.keytab'],
+                                security_params['oozie-site']['oozie.authentication.kerberos.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.oozie_user,
+                                security_params['oozie-site']['oozie.service.HadoopAccessorService.keytab.file'],
+                                security_params['oozie-site']['oozie.service.HadoopAccessorService.kerberos.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    """
+    Performs the tasks surrounding the Oozie startup when a rolling upgrade
+    is in progress. This includes backing up the configuration, updating
+    the database, preparing the WAR, and installing the sharelib in HDFS.
+    :param env:
+    :return:
+    """
+    import params
+    env.set_params(params)
+
+    # this function should not execute if the version can't be determined or
+    # is not at least IOP 4.0.0.0
+    if not params.version or compare_versions(format_stack_version(params.version), '4.0.0.0') < 0:
+      return
+
+    Logger.info("Executing Oozie Server Rolling Upgrade pre-restart")
+
+    oozie_server_upgrade.backup_configuration()
+
+    conf_select.select(params.stack_name, "oozie", params.version)
+    stack_select.select("oozie-server", params.version)
+    #Execute(format("stack-select set oozie-server {version}"))
+
+    oozie_server_upgrade.restore_configuration()
+    #oozie_server_upgrade.prepare_libext_directory()
+    oozie_server_upgrade.upgrade_oozie()
+
+
+if __name__ == "__main__":
+  OozieServer().execute()


[35/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/zkfc_slave.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/zkfc_slave.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/zkfc_slave.py
new file mode 100755
index 0000000..29ad4d9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/zkfc_slave.py
@@ -0,0 +1,148 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_XML
+import utils  # this is needed to avoid a circular dependency since utils.py calls this class
+from hdfs import hdfs
+
+
+class ZkfcSlave(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+
+  def start(self, env, upgrade_type=False):
+    import params
+
+    env.set_params(params)
+    self.configure(env)
+    Directory(params.hadoop_pid_dir_prefix,
+              mode=0755,
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+
+    # format the znode for this HA setup
+    # only run this format command if the active namenode hostname is set
+    # The Ambari UI HA Wizard prompts the user to run this command
+    # manually, so this guarantees it is only run in the Blueprints case
+    if params.dfs_ha_enabled and \
+       params.dfs_ha_namenode_active is not None:
+      success =  initialize_ha_zookeeper(params)
+      if not success:
+        raise Fail("Could not initialize HA state in zookeeper")
+
+    utils.service(
+      action="start", name="zkfc", user=params.hdfs_user, create_pid_dir=True,
+      create_log_dir=True
+    )
+
+  def stop(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    utils.service(
+      action="stop", name="zkfc", user=params.hdfs_user, create_pid_dir=True,
+      create_log_dir=True
+    )
+
+  def configure(self, env):
+    hdfs()
+    pass
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+
+    check_process_status(status_params.zkfc_pid_file)
+
+  def security_status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+
+    props_value_check = {"hadoop.security.authentication": "kerberos",
+                         "hadoop.security.authorization": "true"}
+    props_empty_check = ["hadoop.security.auth_to_local"]
+    props_read_check = None
+    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
+                                                props_read_check)
+    hdfs_expectations = {}
+    hdfs_expectations.update(core_site_expectations)
+
+    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
+                                                   {'core-site.xml': FILE_TYPE_XML})
+    result_issues = validate_security_config_properties(security_params, hdfs_expectations)
+    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
+        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
+      if not result_issues:  # If all validations passed successfully
+        if status_params.hdfs_user_principal or status_params.hdfs_user_keytab:
+          try:
+            cached_kinit_executor(status_params.kinit_path_local,
+                                  status_params.hdfs_user,
+                                  status_params.hdfs_user_keytab,
+                                  status_params.hdfs_user_principal,
+                                  status_params.hostname,
+                                  status_params.tmp_dir)
+            self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+          except Exception as e:
+            self.put_structured_out({"securityState": "ERROR"})
+            self.put_structured_out({"securityStateErrorInfo": str(e)})
+        else:
+          self.put_structured_out(
+            {"securityIssuesFound": "hdfs principal and/or keytab file is not specified"})
+          self.put_structured_out({"securityState": "UNSECURED"})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+def initialize_ha_zookeeper(params):
+  try:
+    iterations = 10
+    formatZK_cmd = "hdfs zkfc -formatZK -nonInteractive"
+    Logger.info("Initialize HA state in ZooKeeper: %s" % (formatZK_cmd))
+    for i in range(iterations):
+      Logger.info('Try %d out of %d' % (i+1, iterations))
+      code, out = shell.call(formatZK_cmd, logoutput=False, user=params.hdfs_user)
+      if code == 0:
+        Logger.info("HA state initialized in ZooKeeper successfully")
+        return True
+      elif code == 2:
+        Logger.info("HA state already initialized in ZooKeeper")
+        return True
+      else:
+        Logger.warning('HA state initialization in ZooKeeper failed with %d error code. Will retry' % (code))
+  except Exception as ex:
+    Logger.error('HA state initialization in ZooKeeper threw an exception. Reason %s' %(str(ex)))
+  return False
+
+if __name__ == "__main__":
+  ZkfcSlave().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/templates/exclude_hosts_list.j2
new file mode 100755
index 0000000..a92cdc1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/templates/exclude_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in hdfs_exclude_file %}
+{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/templates/hdfs.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/templates/hdfs.conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/templates/hdfs.conf.j2
new file mode 100755
index 0000000..d58a6f5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/templates/hdfs.conf.j2
@@ -0,0 +1,35 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{{hdfs_user}}   - nofile 32768
+{{hdfs_user}}   - nproc  65536

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/templates/slaves.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/templates/slaves.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/templates/slaves.j2
new file mode 100755
index 0000000..4a9e713
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/templates/slaves.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in slave_hosts %}
+{{host}}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/widgets.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/widgets.json
new file mode 100755
index 0000000..7e93a6e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/widgets.json
@@ -0,0 +1,428 @@
+{
+  "layouts": [
+    {
+      "layout_name": "default_hdfs_dashboard",
+      "display_name": "Standard HDFS Dashboard",
+      "section_name": "HDFS_SUMMARY",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "NameNode GC count",
+          "description": "Count of total garbage collections and count of major type garbage collections of the JVM.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "jvm.JvmMetrics.GcCount",
+              "metric_path": "metrics/jvm/gcCount",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "jvm.JvmMetrics.GcCountConcurrentMarkSweep",
+              "metric_path": "metrics/jvm/GcCountConcurrentMarkSweep",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "GC total count",
+              "value": "${jvm.JvmMetrics.GcCount}"
+            },
+            {
+              "name": "GC count of type major collection",
+              "value": "${jvm.JvmMetrics.GcCountConcurrentMarkSweep}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "NameNode GC time",
+          "description": "Total time taken by major type garbage collections in milliseconds.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep",
+              "metric_path": "metrics/jvm/GcTimeMillisConcurrentMarkSweep",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "GC time in major collection",
+              "value": "${jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep}"
+            }
+          ],
+          "properties": {
+            "display_unit": "ms",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "NN Connection Load",
+          "description": "Number of open RPC connections being managed by NameNode.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "rpc.rpc.NumOpenConnections",
+              "metric_path": "metrics/rpc/NumOpenConnections",
+              "category": "",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "Open Connections",
+              "value": "${rpc.rpc.NumOpenConnections}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "NameNode Heap",
+          "description": "Heap memory committed and Heap memory used with respect to time.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "jvm.JvmMetrics.MemHeapCommittedM",
+              "metric_path": "metrics/jvm/memHeapCommittedM",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "jvm.JvmMetrics.MemHeapUsedM",
+              "metric_path": "metrics/jvm/memHeapUsedM",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "JVM heap committed",
+              "value": "${jvm.JvmMetrics.MemHeapCommittedM}"
+            },
+            {
+              "name": "JVM heap used",
+              "value": "${jvm.JvmMetrics.MemHeapUsedM}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "NameNode Host Load",
+          "description": "Percentage of CPU and Memory resources being consumed on NameNode host.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "cpu_system",
+              "metric_path": "metrics/cpu/cpu_system",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "cpu_user",
+              "metric_path": "metrics/cpu/cpu_user",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "cpu_nice",
+              "metric_path": "metrics/cpu/cpu_nice",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "cpu_idle",
+              "metric_path": "metrics/cpu/cpu_idle",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "cpu_wio",
+              "metric_path": "metrics/cpu/cpu_wio",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "mem_total",
+              "metric_path": "metrics/memory/mem_total",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "mem_free",
+              "metric_path": "metrics/memory/mem_free",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "mem_cached",
+              "metric_path": "metrics/memory/mem_cached",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "CPU utilization",
+              "value": "${((cpu_system + cpu_user + cpu_nice)/(cpu_system + cpu_user + cpu_nice + cpu_idle + cpu_wio)) * 100}"
+            },
+            {
+              "name": "Memory utilization",
+              "value": "${((mem_total - mem_free - mem_cached)/mem_total) * 100}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1",
+            "display_unit": "%"
+          }
+        },
+        {
+          "widget_name": "NameNode RPC",
+          "description": "Compares the average time spent for RPC request in a queue and RPC request being processed.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "rpc.rpc.RpcQueueTimeAvgTime",
+              "metric_path": "metrics/rpc/RpcQueueTime_avg_time",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "rpc.rpc.RpcProcessingTimeAvgTime",
+              "metric_path": "metrics/rpc/RpcProcessingTime_avg_time",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "RPC Queue Wait time",
+              "value": "${rpc.rpc.RpcQueueTimeAvgTime}"
+            },
+            {
+              "name": "RPC Processing time",
+              "value": "${rpc.rpc.RpcProcessingTimeAvgTime}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1",
+            "display_unit": "ms"
+          }
+        },
+        {
+          "widget_name": "Corrupted Blocks",
+          "description": "Number represents data blocks that have become corrupted or missing. Its indicative of HDFS bad health.",
+          "widget_type": "NUMBER",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "Hadoop:service=NameNode,name=FSNamesystem.CorruptBlocks",
+              "metric_path": "metrics/dfs/FSNamesystem/CorruptBlocks",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "Corrupted Blocks",
+              "value": "${Hadoop:service=NameNode,name=FSNamesystem.CorruptBlocks}"
+            }
+          ],
+          "properties": {
+            "warning_threshold": "0",
+            "error_threshold": "50"
+          }
+        },
+        {
+          "widget_name": "Under Replicated Blocks",
+          "description": "Number represents file blocks that does not meet the replication factor criteria. Its indicative of HDFS bad health.",
+          "widget_type": "NUMBER",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "Hadoop:service=NameNode,name=FSNamesystem.UnderReplicatedBlocks",
+              "metric_path": "metrics/dfs/FSNamesystem/UnderReplicatedBlocks",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "Under Replicated Blocks",
+              "value": "${Hadoop:service=NameNode,name=FSNamesystem.UnderReplicatedBlocks}"
+            }
+          ],
+          "properties": {
+            "warning_threshold": "0",
+            "error_threshold": "50"
+          }
+        }
+      ]
+    },
+    {
+      "layout_name": "default_hdfs_heatmap",
+      "section_name": "HDFS_HEATMAPS",
+      "display_name": "HDFS Heatmaps",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "HDFS Bytes Read",
+          "default_section_name": "HDFS_HEATMAPS",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "dfs.datanode.BytesRead",
+              "metric_path": "metrics/dfs/datanode/bytes_read",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "HDFS Bytes Read",
+              "value": "${dfs.datanode.BytesRead}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "max_limit": "1024"
+          }
+        },
+        {
+          "widget_name": "HDFS Bytes Written",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "dfs.datanode.BytesWritten",
+              "metric_path": "metrics/dfs/datanode/bytes_written",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "HDFS Bytes Written",
+              "value": "${dfs.datanode.BytesWritten}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "max_limit": "1024"
+          }
+        },
+        {
+          "widget_name": "DataNode Garbage Collection Time",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=DataNode,name=JvmMetrics.GcTimeMillis",
+              "metric_path": "metrics/jvm/gcTimeMillis",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "DataNode Garbage Collection Time",
+              "value": "${Hadoop:service=DataNode,name=JvmMetrics.GcTimeMillis}"
+            }
+          ],
+          "properties": {
+            "display_unit": "ms",
+            "max_limit": "10000"
+          }
+        },
+        {
+          "widget_name": "DataNode JVM Heap Memory Used",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=DataNode,name=JvmMetrics.MemHeapUsedM",
+              "metric_path": "metrics/jvm/memHeapUsedM",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "DataNode JVM Heap Memory Used",
+              "value": "${Hadoop:service=DataNode,name=JvmMetrics.MemHeapUsedM}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "max_limit": "512"
+          }
+        },
+        {
+          "widget_name": "DataNode JVM Heap Memory Committed",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=DataNode,name=JvmMetrics.MemHeapCommittedM",
+              "metric_path": "metrics/jvm/memHeapCommittedM",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "DataNode JVM Heap Memory Committed",
+              "value": "${Hadoop:service=DataNode,name=JvmMetrics.MemHeapCommittedM}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "max_limit": "512"
+          }
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/alerts.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/alerts.json
new file mode 100755
index 0000000..9ca12a6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/alerts.json
@@ -0,0 +1,111 @@
+{
+  "HIVE": {
+    "service": [],
+    "HIVE_METASTORE": [
+      {
+        "name": "hive_metastore_process",
+        "label": "Hive Metastore Process",
+        "description": "This host-level alert is triggered if the Hive Metastore process cannot be determined to be up and listening on the network.",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "SCRIPT",
+          "path": "HIVE/1.2.1.4.1/package/alerts/alert_hive_metastore.py",
+          "parameters": [
+            {
+              "name": "default.smoke.user",
+              "display_name": "Default Smoke User",
+              "value": "ambari-qa",
+              "type": "STRING",
+              "description": "The user that will run the Hive commands if not specified in cluster-env/smokeuser"
+            },
+            {
+              "name": "default.smoke.principal",
+              "display_name": "Default Smoke Principal",
+              "value": "ambari-qa@EXAMPLE.COM",
+              "type": "STRING",
+              "description": "The principal to use when retrieving the kerberos ticket if not specified in cluster-env/smokeuser_principal_name"
+            },
+            {
+              "name": "default.smoke.keytab",
+              "display_name": "Default Smoke Keytab",
+              "value": "/etc/security/keytabs/smokeuser.headless.keytab",
+              "type": "STRING",
+              "description": "The keytab to use when retrieving the kerberos ticket if not specified in cluster-env/smokeuser_keytab"
+            }
+          ]
+        }
+      }
+    ],
+    "HIVE_SERVER": [
+      {
+        "name": "hive_server_process",
+        "label": "HiveServer2 Process",
+        "description": "This host-level alert is triggered if the HiveServer cannot be determined to be up and responding to client requests.",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "SCRIPT",
+          "path": "HIVE/1.2.1.4.1/package/alerts/alert_hive_thrift_port.py",
+          "parameters": [
+            {
+              "name": "default.smoke.user",
+              "display_name": "Default Smoke User",
+              "value": "ambari-qa",
+              "type": "STRING",
+              "description": "The user that will run the Hive commands if not specified in cluster-env/smokeuser"
+            },
+            {
+              "name": "default.smoke.principal",
+              "display_name": "Default Smoke Principal",
+              "value": "ambari-qa@EXAMPLE.COM",
+              "type": "STRING",
+              "description": "The principal to use when retrieving the kerberos ticket if not specified in cluster-env/smokeuser_principal_name"
+            },
+            {
+              "name": "default.smoke.keytab",
+              "display_name": "Default Smoke Keytab",
+              "value": "/etc/security/keytabs/smokeuser.headless.keytab",
+              "type": "STRING",
+              "description": "The keytab to use when retrieving the kerberos ticket if not specified in cluster-env/smokeuser_keytab"
+            }
+          ]
+        }
+      }
+    ],
+    "WEBHCAT_SERVER": [
+      {
+        "name": "hive_webhcat_server_status",
+        "label": "WebHCat Server Status",
+        "description": "This host-level alert is triggered if the templeton server status is not healthy.",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "SCRIPT",
+          "path": "HIVE/1.2.1.4.1/package/alerts/alert_webhcat_server.py",
+          "parameters": [
+            {
+              "name": "default.smoke.user",
+              "display_name": "Default Smoke User",
+              "value": "ambari-qa",
+              "type": "STRING",
+              "description": "The user that will run the Hive commands if not specified in cluster-env/smokeuser"
+            },
+            {
+              "name": "connection.timeout",
+              "display_name": "Connection Timeout",
+              "value": 5.0,
+              "type": "NUMERIC",
+              "description": "The maximum time before this alert is considered to be CRITICAL",
+              "units": "seconds",
+              "threshold": "CRITICAL"
+            }
+          ]
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hcat-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hcat-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hcat-env.xml
new file mode 100755
index 0000000..df3f949
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hcat-env.xml
@@ -0,0 +1,57 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <!-- hcat-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for hcat-env.sh file</description>
+    <value>
+      # Licensed to the Apache Software Foundation (ASF) under one
+      # or more contributor license agreements. See the NOTICE file
+      # distributed with this work for additional information
+      # regarding copyright ownership. The ASF licenses this file
+      # to you under the Apache License, Version 2.0 (the
+      # "License"); you may not use this file except in compliance
+      # with the License. You may obtain a copy of the License at
+      #
+      # http://www.apache.org/licenses/LICENSE-2.0
+      #
+      # Unless required by applicable law or agreed to in writing, software
+      # distributed under the License is distributed on an "AS IS" BASIS,
+      # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+      # See the License for the specific language governing permissions and
+      # limitations under the License.
+
+      JAVA_HOME={{java64_home}}
+      HCAT_PID_DIR={{hcat_pid_dir}}/
+      HCAT_LOG_DIR={{hcat_log_dir}}/
+      HCAT_CONF_DIR={{hcat_conf_dir}}
+      HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+      #DBROOT is the path where the connector jars are downloaded
+      DBROOT={{hcat_dbroot}}
+      USER={{hcat_user}}
+      METASTORE_PORT={{hive_metastore_port}}
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hive-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hive-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hive-env.xml
new file mode 100755
index 0000000..1f8e64b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hive-env.xml
@@ -0,0 +1,195 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>hive.heapsize</name>
+    <value>1024</value>
+    <description>Hive Java heap size</description>
+  </property>
+
+  <property>
+    <name>hive.client.heapsize</name>
+    <value>512</value>
+    <description>Hive Client Java heap size</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.heapsize</name>
+    <value>1024</value>
+    <description>Hive Metastore Java heap size</description>
+  </property>
+
+  <property>
+    <name>hive_database_type</name>
+    <value>mysql</value>
+    <description>Default HIVE DB type.</description>
+  </property>
+  <property>
+    <name>hive_database</name>
+    <display-name>Hive Database</display-name>
+    <value>New MySQL Database</value>
+    <description>
+      Property that determines whether the HIVE DB is managed by Ambari.
+    </description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hive_ambari_database</name>
+    <display-name>Database Type</display-name>
+    <value>MySQL</value>
+    <description>Database type.</description>
+  </property>
+  <property>
+    <name>hive_database_name</name>
+    <value>hive</value>
+    <description>Database name.</description>
+  </property>
+  <property>
+    <name>hive_dbroot</name>
+    <value>/usr/lib/hive/lib/</value>
+    <description>Hive DB Directory.</description>
+  </property>
+  <property>
+    <name>hive_log_dir</name>
+    <display-name>Hive Log Dir</display-name>
+    <value>/var/log/hive</value>
+    <description>Directory for Hive Log files.</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hive_pid_dir</name>
+    <display-name>Hive PID Dir</display-name>
+    <value>/var/run/hive</value>
+    <description>Hive PID Dir.</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hive_user</name>
+    <value>hive</value>
+    <property-type>USER</property-type>
+    <description>Hive User.</description>
+  </property>
+
+  <!--HCAT-->
+
+  <property>
+    <name>hcat_log_dir</name>
+    <display-name>WebHCat Log Dir</display-name>
+    <value>/var/log/webhcat</value>
+    <description>WebHCat Log Dir.</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hcat_pid_dir</name>
+    <display-name>WebHCat Pid Dir</display-name>
+    <value>/var/run/webhcat</value>
+    <description>WebHCat Pid Dir.</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hcat_user</name>
+    <value>hcat</value>
+    <property-type>USER</property-type>
+    <description>HCat User.</description>
+  </property>
+  <property>
+    <name>webhcat_user</name>
+    <value>hcat</value>
+    <property-type>USER</property-type>
+    <description>WebHCat User.</description>
+  </property>
+
+  <!-- hive-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for hive-env.sh file</description>
+    <value>
+ if [ "$SERVICE" = "cli" ]; then
+   if [ -z "$DEBUG" ]; then
+     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
+   else
+     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
+   fi
+ fi
+
+# The heap size of the jvm stared by hive shell script can be controlled via:
+
+if [ "$SERVICE" = "metastore" ]; then
+  export HADOOP_HEAPSIZE="{{hive_metastore_heapsize}}"
+else
+  export HADOOP_HEAPSIZE="{{hive_heapsize}}"
+fi
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+
+# Larger heap size may be required when running queries over large number of files or partitions.
+# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
+# appropriate for hive server (hwi etc).
+
+
+# Set HADOOP_HOME to point to a specific hadoop install directory
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+
+# Hive Configuration Directory can be controlled by:
+export HIVE_CONF_DIR={{hive_config_dir}}
+
+# Folder containing extra libraries required for hive compilation/execution can be controlled by:
+if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
+  export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
+elif [ -d "{{hcat_lib}}" ]; then
+  export HIVE_AUX_JARS_PATH={{hcat_lib}}
+fi
+
+# Set HIVE_AUX_JARS_PATH
+export HIVE_AUX_JARS_PATH={{hbase_lib}}/hbase-client.jar,\
+{{hbase_lib}}/hbase-common.jar,\
+{{hbase_lib}}/hbase-hadoop2-compat.jar,\
+{{hbase_lib}}/hbase-prefix-tree.jar,\
+{{hbase_lib}}/hbase-protocol.jar,\
+{{hbase_lib}}/hbase-server.jar,\
+{{hbase_lib}}/htrace-core-2.04.jar,\
+{{hbase_lib}}/high-scale-lib-1.1.1.jar,\
+${HIVE_AUX_JARS_PATH}
+
+export METASTORE_PORT={{hive_metastore_port}}
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hive-exec-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hive-exec-log4j.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hive-exec-log4j.xml
new file mode 100755
index 0000000..3e17d2d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hive-exec-log4j.xml
@@ -0,0 +1,118 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <description>Custom hive-exec-log4j</description>
+    <value>
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Define some default values that can be overridden by system properties
+
+hive.log.threshold=ALL
+hive.root.logger=INFO,RFA
+hive.log.dir=${java.io.tmpdir}/${user.name}
+hive.query.id=hadoop
+hive.log.file=${hive.query.id}.log
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hive.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=${hive.log.threshold}
+
+#
+# Rolling File Appender
+#
+
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hive.log.dir}/${hive.log.file}
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFA.append=true
+log4j.appender.RFA.bufferedIO=false
+log4j.appender.RFA.maxBackupIndex=5
+log4j.appender.RFA.maxFileSize=1GB
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} [%t] %p %c{2}: %m%n
+
+#custom logging levels
+#log4j.logger.xxx=DEBUG
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.hive.shims.HiveEventCounter
+
+
+log4j.category.DataNucleus=ERROR,RFA
+log4j.category.Datastore=ERROR,RFA
+log4j.category.Datastore.Schema=ERROR,RFA
+log4j.category.JPOX.Datastore=ERROR,RFA
+log4j.category.JPOX.Plugin=ERROR,RFA
+log4j.category.JPOX.MetaData=ERROR,RFA
+log4j.category.JPOX.Query=ERROR,RFA
+log4j.category.JPOX.General=ERROR,RFA
+log4j.category.JPOX.Enhancer=ERROR,RFA
+
+
+# Silence useless ZK logs
+log4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN,RFA
+log4j.logger.org.apache.zookeeper.ClientCnxnSocketNIO=WARN,RFA
+
+# Disable deprecated parameter warning messages in hive
+log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
+
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hive-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hive-log4j.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hive-log4j.xml
new file mode 100755
index 0000000..f7f789b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hive-log4j.xml
@@ -0,0 +1,136 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <description>Custom log4j.properties</description>
+    <value>
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Define some default values that can be overridden by system properties
+hive.log.threshold=ALL
+hive.root.logger=INFO,RFA
+hive.log.dir=${java.io.tmpdir}/${user.name}
+hive.log.file=hive.log
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hive.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshold=${hive.log.threshold}
+
+#
+# Rolling File Appender
+#
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hive.log.dir}/${hive.log.file}
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+log4j.appender.RFA.append=true
+log4j.appender.RFA.bufferedIO=false
+log4j.appender.RFA.maxBackupIndex=5
+log4j.appender.RFA.maxFileSize=1GB
+
+#
+# Daily Rolling File Appender
+#
+# Use the PidDailyerRollingFileAppend class instead if you want to use separate log files
+# for different CLI session.
+#
+# log4j.appender.DRFA=org.apache.hadoop.hive.ql.log.PidDailyRollingFileAppender
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+
+log4j.appender.DRFA.File=${hive.log.dir}/${hive.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n
+log4j.appender.console.encoding=UTF-8
+
+#custom logging levels
+#log4j.logger.xxx=DEBUG
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.hive.shims.HiveEventCounter
+
+
+log4j.category.DataNucleus=ERROR,RFA
+log4j.category.Datastore=ERROR,RFA
+log4j.category.Datastore.Schema=ERROR,RFA
+log4j.category.JPOX.Datastore=ERROR,RFA
+log4j.category.JPOX.Plugin=ERROR,RFA
+log4j.category.JPOX.MetaData=ERROR,RFA
+log4j.category.JPOX.Query=ERROR,RFA
+log4j.category.JPOX.General=ERROR,RFA
+log4j.category.JPOX.Enhancer=ERROR,RFA
+
+
+# Silence useless ZK logs
+log4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN,RFA
+log4j.logger.org.apache.zookeeper.ClientCnxnSocketNIO=WARN,RFA
+
+# Disable deprecated parameter warning messages in hive
+log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
+
+    </value>
+  </property>
+
+</configuration>


[42/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/metrics.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/metrics.json
new file mode 100755
index 0000000..a309ec7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HBASE/metrics.json
@@ -0,0 +1,9410 @@
+{
+  "HBASE_REGIONSERVER": {
+    "Component": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "default": {
+            "metrics/cpu/cpu_idle":{
+              "metric":"cpu_idle",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/cpu/cpu_nice":{
+              "metric":"cpu_nice",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/cpu/cpu_system":{
+              "metric":"cpu_system",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/cpu/cpu_user":{
+              "metric":"cpu_user",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/cpu/cpu_wio":{
+              "metric":"cpu_wio",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/disk_free":{
+              "metric":"disk_free",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/disk_total":{
+              "metric":"disk_total",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/read_bps":{
+              "metric":"read_bps",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/write_bps":{
+              "metric":"write_bps",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/load/load_fifteen":{
+              "metric":"load_fifteen",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/load/load_five":{
+              "metric":"load_five",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/load/load_one":{
+              "metric":"load_one",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/memory/mem_buffers":{
+              "metric":"mem_buffers",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/memory/mem_cached":{
+              "metric":"mem_cached",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/memory/mem_free":{
+              "metric":"mem_free",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/memory/mem_shared":{
+              "metric":"mem_shared",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/memory/mem_total":{
+              "metric":"mem_total",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/memory/swap_free":{
+              "metric":"swap_free",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/memory/swap_total":{
+              "metric":"swap_total",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/network/bytes_in":{
+              "metric":"bytes_in",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/network/bytes_out":{
+              "metric":"bytes_out",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/network/pkts_in":{
+              "metric":"pkts_in",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/network/pkts_out":{
+              "metric":"pkts_out",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/process/proc_run":{
+              "metric":"proc_run",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/process/proc_total":{
+              "metric":"proc_total",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/read_count":{
+              "metric":"read_count",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/write_count":{
+              "metric":"write_count",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/read_bytes":{
+              "metric":"read_bytes",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/write_bytes":{
+              "metric":"write_bytes",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/read_time":{
+              "metric":"read_time",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/disk/write_time":{
+              "metric":"write_time",
+              "pointInTime":true,
+              "temporal":true
+            },
+            "metrics/hbase/regionserver/mutationsWithoutWALSize": {
+              "metric": "regionserver.Server.mutationsWithoutWALSize",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/slowAppendCount": {
+              "metric": "regionserver.Server.slowAppendCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/disk/part_max_used": {
+              "metric": "part_max_used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheCount": {
+              "metric": "regionserver.Server.blockCacheCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_num_ops": {
+              "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/memHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/threadsRunnable": {
+              "metric": "jvm.JvmMetrics.ThreadsRunnable",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_min": {
+              "metric": "regionserver.Server.Delete_min",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsNew": {
+              "metric": "jvm.JvmMetrics.ThreadsNew",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthorizationFailures": {
+              "metric": "regionserver.RegionServer.authorizationFailures",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_avg_time": {
+              "metric": "regionserver.RegionServer.QueueCallTime_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/boottime": {
+              "metric": "boottime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/writeRequestsCount": {
+              "metric": "regionserver.Server.writeRequestCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/getRequestLatency_min": {
+              "metric": "regionserver.Server.Get_min",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_num_ops": {
+              "metric": "regionserver.RegionServer.ProcessCallTime_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/logError": {
+              "metric": "jvm.JvmMetrics.LogError",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/putRequestLatency_75th_percentile": {
+              "metric": "regionserver.Server.Mutate_75th_percentile",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheHitCount": {
+              "metric": "regionserver.Server.blockCacheHitCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/slowPutCount": {
+              "metric": "regionserver.Server.slowPutCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/regionServerStartup_avg_time": {
+              "metric": "regionserver.Server.regionServerStartTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheSize": {
+              "metric": "regionserver.Server.blockCacheSize",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsBlocked": {
+              "metric": "jvm.JvmMetrics.ThreadsBlocked",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/putRequestLatency_median": {
+              "metric": "regionserver.Server.Mutate_median",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/readRequestsCount": {
+              "metric": "regionserver.Server.readRequestCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/putRequestLatency_min": {
+              "metric": "regionserver.Server.Mutate_min",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/storefileIndexSizeMB": {
+              "metric": "regionserver.Server.storeFileIndexSize",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_median": {
+              "metric": "regionserver.Server.Delete_median",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Server/Get_num_ops": {
+              "metric": "regionserver.Server.Get_num_ops",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Server/ScanNext_num_ops": {
+              "metric": "regionserver.Server.ScanNext_num_ops",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Server/Append_num_ops": {
+              "metric": "regionserver.Server.Append_num_ops",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Server/Delete_num_ops": {
+              "metric": "regionserver.Server.Delete_num_ops",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Server/Mutate_num_ops": {
+              "metric": "regionserver.Server.Mutate_num_ops",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Server/Increment_num_ops": {
+              "metric": "regionserver.Server.Increment_num_ops",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Server/Get_95th_percentile": {
+              "metric": "regionserver.Server.Get_95th_percentile",
+              "unit": "ms",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Server/ScanNext_95th_percentile": {
+              "metric": "regionserver.Server.ScanNext_95th_percentile",
+              "unit": "ms",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Server/Mutate_95th_percentile": {
+              "metric": "regionserver.Server.Mutate_95th_percentile",
+              "unit": "ms",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Server/Increment_95th_percentile": {
+              "metric": "regionserver.Server.Increment_95th_percentile",
+              "unit": "ms",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Server/Append_95th_percentile": {
+              "metric": "regionserver.Server.Append_95th_percentile",
+              "unit": "ms",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Server/Delete_95th_percentile": {
+              "metric": "regionserver.Server.Delete_95th_percentile",
+              "unit": "ms",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Server/percentFilesLocal": {
+              "metric": "regionserver.Server.percentFilesLocal",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/Server/updatesBlockedTime": {
+              "metric": "regionserver.Server.updatesBlockedTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/IPC/numOpenConnections": {
+              "metric": "regionserver.RegionServer.numOpenConnections",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/IPC/numActiveHandler": {
+              "metric": "regionserver.RegionServer.numActiveHandler",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/ipc/IPC/numCallsInGeneralQueue": {
+              "metric": "regionserver.RegionServer.numCallsInGeneralQueue",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/putRequestLatency_mean": {
+              "metric": "regionserver.Server.Mutate_mean",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_avg_time": {
+              "metric": "regionserver.RegionServer.ProcessCallTime_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+           "metrics/rpc/rpcAuthenticationFailures": {
+              "metric": "regionserver.RegionServer.authenticationFailures",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheHitRatio": {
+              "metric": "regionserver.Server.blockCacheCountHitPercent",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheHitPercent": {
+              "metric": "regionserver.Server.blockCountHitPercent",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheEvictedCount": {
+              "metric": "regionserver.Server.blockCacheEvictionCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/putRequestLatency_99th_percentile": {
+              "metric": "regionserver.Server.Mutate_99th_percentile",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/getRequestLatency_max": {
+              "metric": "regionserver.Server.Get_max",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_avg_time": {
+              "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/logFatal": {
+              "metric": "jvm.JvmMetrics.LogFatal",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/getRequestLatency_99th_percentile": {
+              "metric": "regionserver.Server.Get_99th_percentile",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_avg_time": {
+              "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_99th_percentile": {
+              "metric": "regionserver.Server.Delete_99th_percentile",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/slowIncrementCount": {
+              "metric": "regionserver.Server.slowIncrementCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/compactionQueueSize": {
+              "metric": "regionserver.Server.compactionQueueLength",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/flushTime_num_ops": {
+              "metric": "regionserver.Server.FlushTime_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/getRequestLatency_75th_percentile": {
+              "metric": "regionserver.Server.Get_75th_percentile",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/stores": {
+              "metric": "regionserver.Server.storeCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_num_ops": {
+              "metric": "ugi.UgiMetrics.LoginFailureNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthenticationSuccesses": {
+              "metric": "regionserver.RegionServer.authenticationSuccesses",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/ReceivedBytes": {
+              "metric": "regionserver.RegionServer.receivedBytes",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/gcTimeMillis": {
+              "metric": "jvm.JvmMetrics.GcTimeMillis",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTerminated": {
+              "metric": "jvm.JvmMetrics.ThreadsTerminated",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/putRequestLatency_max": {
+              "metric": "regionserver.Server.Mutate_max",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/getRequestLatency_mean": {
+              "metric": "regionserver.Server.Get_mean",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/regions": {
+              "metric": "regionserver.Server.regionCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheFree": {
+              "metric": "regionserver.Server.blockCacheFreeSize",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheMissCount": {
+              "metric": "regionserver.Server.blockCacheMissCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/flushQueueSize": {
+              "metric": "regionserver.Server.flushQueueLength",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_num_ops": {
+              "metric": "regionserver.RegionServer.QueueCallTime_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_mean": {
+              "metric": "regionserver.Server.Delete_mean",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/cpu/cpu_aidle": {
+              "metric": "cpu_aidle",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/totalStaticIndexSizeKB": {
+              "metric": "regionserver.Server.staticIndexSize",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/mutationsWithoutWALCount": {
+              "metric": "regionserver.Server.mutationsWithoutWALCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/getRequestLatency_median": {
+              "metric": "regionserver.Server.Get_median",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/cpu/cpu_speed": {
+              "metric": "cpu_speed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_max": {
+              "metric": "regionserver.Server.Delete_max",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/SentBytes": {
+              "metric": "regionserver.RegionServer.sentBytes",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/logWarn": {
+              "metric": "jvm.JvmMetrics.LogWarn",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/maxMemoryM": {
+              "metric": "jvm.metrics.maxMemoryM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTimedWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/gcCount": {
+              "metric": "jvm.JvmMetrics.GcCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/flushTime_avg_time": {
+              "metric": "regionserver.Server.FlushTime_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/memHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemHeapUsedM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/threadsWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsWaiting",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/slowGetCount": {
+              "metric": "regionserver.Server.slowGetCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/requests": {
+              "metric": "regionserver.Server.totalRequestCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/storefiles": {
+              "metric": "regionserver.Server.storeFileCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/slowDeleteCount": {
+              "metric": "regionserver.Server.slowDeleteCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logInfo": {
+              "metric": "jvm.JvmMetrics.LogInfo",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/hlogFileCount": {
+              "metric": "regionserver.Server.hlogFileCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_95th_percentile": {
+              "metric": "regionserver.Server.Delete_95th_percentile",
+              "unit": "ms",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/memstoreSize": {
+              "metric": "regionserver.Server.memStoreSize",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_75th_percentile": {
+              "metric": "regionserver.Server.Delete_75th_percentile",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthorizationSuccesses": {
+              "metric": "regionserver.RegionServer.authorizationSuccesses",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/totalStaticBloomSizeKB": {
+              "metric": "regionserver.Server.staticBloomSize",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/increment_avg_time": {
+              "metric": "regionserver.Server.Increment_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcCountConcurrentMarkSweep": {
+              "metric": "jvm.JvmMetrics.GcCountConcurrentMarkSweep",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcCountParNew": {
+              "metric": "jvm.JvmMetrics.GcCountParNew",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcTimeMillisConcurrentMarkSweep": {
+              "metric": "jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcTimeMillisParNew": {
+              "metric": "jvm.JvmMetrics.GcTimeMillisParNew",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/MemHeapMaxM": {
+              "metric": "jvm.JvmMetrics.MemHeapMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/MemMaxM": {
+              "metric": "jvm.JvmMetrics.MemMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/MemNonHeapMaxM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/DroppedPubAll": {
+              "metric": "metricssystem.MetricsSystem.DroppedPubAll",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumActiveSinks": {
+              "metric": "metricssystem.MetricsSystem.NumActiveSinks",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumActiveSources": {
+              "metric": "metricssystem.MetricsSystem.NumActiveSources",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumAllSinks": {
+              "metric": "metricssystem.MetricsSystem.NumAllSinks",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumAllSources": {
+              "metric": "metricssystem.MetricsSystem.NumAllSources",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/PublishAvgTime": {
+              "metric": "metricssystem.MetricsSystem.PublishAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/PublishNumOps": {
+              "metric": "metricssystem.MetricsSystem.PublishNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineAvgTime": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineDropped": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineDropped",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineNumOps": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineQsize": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineQsize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/SnapshotAvgTime": {
+              "metric": "metricssystem.MetricsSystem.SnapshotAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/SnapshotNumOps": {
+              "metric": "metricssystem.MetricsSystem.SnapshotNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/numCallsInPriorityQueue": {
+              "metric": "regionserver.RegionServer.numCallsInPriorityQueue",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/numCallsInReplicationQueue": {
+              "metric": "regionserver.RegionServer.numCallsInReplicationQueue",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/ProcessCallTime_75th_percentile": {
+              "metric": "regionserver.RegionServer.ProcessCallTime_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/ProcessCallTime_95th_percentile": {
+              "metric": "regionserver.RegionServer.ProcessCallTime_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/ProcessCallTime_99th_percentile": {
+              "metric": "regionserver.RegionServer.ProcessCallTime_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/ProcessCallTime_max": {
+              "metric": "regionserver.RegionServer.ProcessCallTime_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/ProcessCallTime_mean": {
+              "metric": "regionserver.RegionServer.ProcessCallTime_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/ProcessCallTime_min": {
+              "metric": "regionserver.RegionServer.ProcessCallTime_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/QueueCallTime_75th_percentile": {
+              "metric": "regionserver.RegionServer.QueueCallTime_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/QueueCallTime_95th_percentile": {
+              "metric": "regionserver.RegionServer.QueueCallTime_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/QueueCallTime_99th_percentile": {
+              "metric": "regionserver.RegionServer.QueueCallTime_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/QueueCallTime_max": {
+              "metric": "regionserver.RegionServer.QueueCallTime_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/QueueCallTime_mean": {
+              "metric": "regionserver.RegionServer.QueueCallTime_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/QueueCallTime_min": {
+              "metric": "regionserver.RegionServer.QueueCallTime_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/queueSize": {
+              "metric": "regionserver.RegionServer.queueSize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/TotalCallTime_75th_percentile": {
+              "metric": "regionserver.RegionServer.TotalCallTime_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/TotalCallTime_95th_percentile": {
+              "metric": "regionserver.RegionServer.TotalCallTime_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/TotalCallTime_99th_percentile": {
+              "metric": "regionserver.RegionServer.TotalCallTime_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/TotalCallTime_max": {
+              "metric": "regionserver.RegionServer.TotalCallTime_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/TotalCallTime_mean": {
+              "metric": "regionserver.RegionServer.TotalCallTime_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/TotalCallTime_median": {
+              "metric": "regionserver.RegionServer.TotalCallTime_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/TotalCallTime_min": {
+              "metric": "regionserver.RegionServer.TotalCallTime_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/RegionServer/TotalCallTime_num_ops": {
+              "metric": "regionserver.RegionServer.TotalCallTime_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Replication/sink/ageOfLastAppliedOp": {
+              "metric": "regionserver.Replication.sink.ageOfLastAppliedOp",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Replication/sink/appliedBatches": {
+              "metric": "regionserver.Replication.sink.appliedBatches",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Replication/sink/appliedOps": {
+              "metric": "regionserver.Replication.sink.appliedOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/Append_75th_percentile": {
+              "metric": "regionserver.Server.Append_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/Append_99th_percentile": {
+              "metric": "regionserver.Server.Append_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/Append_max": {
+              "metric": "regionserver.Server.Append_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/Append_mean": {
+              "metric": "regionserver.Server.Append_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/Append_median": {
+              "metric": "regionserver.Server.Append_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/Append_min": {
+              "metric": "regionserver.Server.Append_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/blockCacheExpressHitPercent": {
+              "metric": "regionserver.Server.blockCacheExpressHitPercent",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/blockedRequestCount": {
+              "metric": "regionserver.Server.blockedRequestCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/checkMutateFailedCount": {
+              "metric": "regionserver.Server.checkMutateFailedCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/checkMutatePassedCount": {
+              "metric": "regionserver.Server.checkMutatePassedCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/compactedCellsCount": {
+              "metric": "regionserver.Server.compactedCellsCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/compactedCellsSize": {
+              "metric": "regionserver.Server.compactedCellsSize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/flushedCellsCount": {
+              "metric": "regionserver.Server.flushedCellsCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/flushedCellsSize": {
+              "metric": "regionserver.Server.flushedCellsSize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/FlushTime_75th_percentile": {
+              "metric": "regionserver.Server.FlushTime_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/FlushTime_95th_percentile": {
+              "metric": "regionserver.Server.FlushTime_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/FlushTime_99th_percentile": {
+              "metric": "regionserver.Server.FlushTime_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/FlushTime_max": {
+              "metric": "regionserver.Server.FlushTime_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/FlushTime_mean": {
+              "metric": "regionserver.Server.FlushTime_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/FlushTime_min": {
+              "metric": "regionserver.Server.FlushTime_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/hlogFileSize": {
+              "metric": "regionserver.Server.hlogFileSize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/Increment_75th_percentile": {
+              "metric": "regionserver.Server.Increment_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/Increment_99th_percentile": {
+              "metric": "regionserver.Server.Increment_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/Increment_max": {
+              "metric": "regionserver.Server.Increment_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/Increment_mean": {
+              "metric": "regionserver.Server.Increment_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/Increment_min": {
+              "metric": "regionserver.Server.Increment_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/majorCompactedCellsCount": {
+              "metric": "regionserver.Server.majorCompactedCellsCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/majorCompactedCellsSize": {
+              "metric": "regionserver.Server.majorCompactedCellsSize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/percentFilesLocalSecondaryRegions": {
+              "metric": "regionserver.Server.percentFilesLocalSecondaryRegions",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/Replay_75th_percentile": {
+              "metric": "regionserver.Server.Replay_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/Replay_95th_percentile": {
+              "metric": "regionserver.Server.Replay_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/Replay_99th_percentile": {
+              "metric": "regionserver.Server.Replay_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/Replay_max": {
+              "metric": "regionserver.Server.Replay_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/Replay_mean": {
+              "metric": "regionserver.Server.Replay_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/Replay_median": {
+              "metric": "regionserver.Server.Replay_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/Replay_min": {
+              "metric": "regionserver.Server.Replay_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/Replay_num_ops": {
+              "metric": "regionserver.Server.Replay_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/ScanNext_75th_percentile": {
+              "metric": "regionserver.Server.ScanNext_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/ScanNext_99th_percentile": {
+              "metric": "regionserver.Server.ScanNext_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/ScanNext_max": {
+              "metric": "regionserver.Server.ScanNext_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/ScanNext_mean": {
+              "metric": "regionserver.Server.ScanNext_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/ScanNext_median": {
+              "metric": "regionserver.Server.ScanNext_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/ScanNext_min": {
+              "metric": "regionserver.Server.ScanNext_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/splitQueueLength": {
+              "metric": "regionserver.Server.splitQueueLength",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/splitRequestCount": {
+              "metric": "regionserver.Server.splitRequestCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/splitSuccessCount": {
+              "metric": "regionserver.Server.splitSuccessCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/SplitTime_75th_percentile": {
+              "metric": "regionserver.Server.SplitTime_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/SplitTime_95th_percentile": {
+              "metric": "regionserver.Server.SplitTime_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/SplitTime_99th_percentile": {
+              "metric": "regionserver.Server.SplitTime_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/SplitTime_max": {
+              "metric": "regionserver.Server.SplitTime_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/SplitTime_mean": {
+              "metric": "regionserver.Server.SplitTime_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/SplitTime_median": {
+              "metric": "regionserver.Server.SplitTime_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/SplitTime_min": {
+              "metric": "regionserver.Server.SplitTime_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/SplitTime_num_ops": {
+              "metric": "regionserver.Server.SplitTime_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/Server/storeFileSize": {
+              "metric": "regionserver.Server.storeFileSize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/appendCount": {
+              "metric": "regionserver.WAL.appendCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/AppendSize_75th_percentile": {
+              "metric": "regionserver.WAL.AppendSize_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/AppendSize_95th_percentile": {
+              "metric": "regionserver.WAL.AppendSize_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/AppendSize_99th_percentile": {
+              "metric": "regionserver.WAL.AppendSize_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/AppendSize_max": {
+              "metric": "regionserver.WAL.AppendSize_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/AppendSize_mean": {
+              "metric": "regionserver.WAL.AppendSize_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/AppendSize_median": {
+              "metric": "regionserver.WAL.AppendSize_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/AppendSize_min": {
+              "metric": "regionserver.WAL.AppendSize_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/AppendSize_num_ops": {
+              "metric": "regionserver.WAL.AppendSize_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/AppendTime_75th_percentile": {
+              "metric": "regionserver.WAL.AppendTime_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/AppendTime_95th_percentile": {
+              "metric": "regionserver.WAL.AppendTime_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/AppendTime_99th_percentile": {
+              "metric": "regionserver.WAL.AppendTime_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/AppendTime_max": {
+              "metric": "regionserver.WAL.AppendTime_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/AppendTime_mean": {
+              "metric": "regionserver.WAL.AppendTime_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/AppendTime_median": {
+              "metric": "regionserver.WAL.AppendTime_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/AppendTime_min": {
+              "metric": "regionserver.WAL.AppendTime_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/AppendTime_num_ops": {
+              "metric": "regionserver.WAL.AppendTime_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/lowReplicaRollRequest": {
+              "metric": "regionserver.WAL.lowReplicaRollRequest",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/rollRequest": {
+              "metric": "regionserver.WAL.rollRequest",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/slowAppendCount": {
+              "metric": "regionserver.WAL.slowAppendCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/SyncTime_75th_percentile": {
+              "metric": "regionserver.WAL.SyncTime_75th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/SyncTime_95th_percentile": {
+              "metric": "regionserver.WAL.SyncTime_95th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/SyncTime_99th_percentile": {
+              "metric": "regionserver.WAL.SyncTime_99th_percentile",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/SyncTime_max": {
+              "metric": "regionserver.WAL.SyncTime_max",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/SyncTime_mean": {
+              "metric": "regionserver.WAL.SyncTime_mean",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/SyncTime_median": {
+              "metric": "regionserver.WAL.SyncTime_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/SyncTime_min": {
+              "metric": "regionserver.WAL.SyncTime_min",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/regionserver/WAL/SyncTime_num_ops": {
+              "metric": "regionserver.WAL.SyncTime_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/UgiMetrics/GetGroupsAvgTime": {
+              "metric": "ugi.UgiMetrics.GetGroupsAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/UgiMetrics/GetGroupsNumOps": {
+              "metric": "ugi.UgiMetrics.GetGroupsNumOps",
+              "pointInTime": true,
+              "temporal": true
+            }
+          }
+        }
+      },
+      {
+        "type": "jmx",
+        "metrics": {
+          "default": {
+            "metrics/hbase/regionserver/slowPutCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowPutCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/percentFilesLocal": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.percentFilesLocal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_min": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_min",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/blockCacheFree": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheFreeSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/mutationsWithoutWALSize": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.mutationsWithoutWALSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/blockCacheMissCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheMissCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/flushQueueSize": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.flushQueueLength",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_99th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_99th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_num_ops": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/ScanNext_num_ops": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.ScanNext_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/Increment_num_ops": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Increment_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/Append_num_ops": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Append_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/ScanNext_95th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.ScanNext_95th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/Append_95th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Append_95th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/Increment_95th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Increment_95th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/updatesBlockedTime": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.updatesBlockedTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/IPC/numActiveHandler": {
+              "metric": "Hadoop:service=HBase,name=IPC,sub=IPC.numActiveHandler",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/IPC/numCallsInGeneralQueue": {
+              "metric": "Hadoop:service=HBase,name=IPC,sub=IPC.numCallsInGeneralQueue",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/IPC/numOpenConnections": {
+              "metric": "Hadoop:service=HBase,name=IPC,sub=IPC.numOpenConnections",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/slowAppendCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowAppendCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/blockCacheSize": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_num_ops": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/slowIncrementCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowIncrementCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/blockCacheEvictedCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheEvictionCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_95th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_95th_percentile",
+              "unit": "ms",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/compactionQueueSize": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.compactionQueueLength",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_median": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_median",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_mean": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_mean",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/slowGetCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowGetCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/blockCacheCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_75th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_75th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/readRequestsCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.readRequestCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_min": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_min",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/storefileIndexSizeMB": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeFileIndexSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_median": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_median",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_max": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_max",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/totalStaticIndexSizeKB": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.staticIndexSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_num_ops": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_num_ops",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_mean": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_mean",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/requests": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.totalRequestCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/storefiles": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeFileCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/mutationsWithoutWALCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.mutationsWithoutWALCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/writeRequestsCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.writeRequestCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_median": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_median",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/slowDeleteCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowDeleteCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_99th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_99th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/stores": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_min": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_min",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_95th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_95th_percentile",
+              "unit": "ms",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_95th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_95th_percentile",
+              "unit": "ms",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/memstoreSize": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.memStoreSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_max": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_max",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_mean": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_mean",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_75th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_75th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_max": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_max",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/putRequestLatency_75th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_75th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/regions": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.regionCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/totalStaticBloomSizeKB": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.staticBloomSize",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/blockCacheHitCount": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheHitCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/hbase/regionserver/getRequestLatency_99th_percentile": {
+              "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_99th_percentile",
+              "pointInTime": true,
+              "temporal": false
+            }
+          }
+        }
+      }
+    ],
+    "HostComponent": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "default": {
+            "metrics/cpu/cpu_idle":{
+              "metric":"cpu_idle",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_nice":{
+              "metric":"cpu_nice",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_system":{
+              "metric":"cpu_system",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_user":{
+              "metric":"cpu_user",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/cpu/cpu_wio":{
+              "metric":"cpu_wio",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/disk_free":{
+              "metric":"disk_free",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/disk_total":{
+              "metric":"disk_total",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/read_bps":{
+              "metric":"read_bps",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/write_bps":{
+              "metric":"write_bps",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/load/load_fifteen":{
+              "metric":"load_fifteen",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/load/load_five":{
+              "metric":"load_five",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/load/load_one":{
+              "metric":"load_one",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_buffers":{
+              "metric":"mem_buffers",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_cached":{
+              "metric":"mem_cached",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_free":{
+              "metric":"mem_free",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_shared":{
+              "metric":"mem_shared",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/mem_total":{
+              "metric":"mem_total",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/swap_free":{
+              "metric":"swap_free",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/memory/swap_total":{
+              "metric":"swap_total",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/network/bytes_in":{
+              "metric":"bytes_in",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/network/bytes_out":{
+              "metric":"bytes_out",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/network/pkts_in":{
+              "metric":"pkts_in",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/network/pkts_out":{
+              "metric":"pkts_out",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/process/proc_run":{
+              "metric":"proc_run",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/process/proc_total":{
+              "metric":"proc_total",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/read_count":{
+              "metric":"read_count",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/write_count":{
+              "metric":"write_count",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/read_bytes":{
+              "metric":"read_bytes",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/write_bytes":{
+              "metric":"write_bytes",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/read_time":{
+              "metric":"read_time",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/write_time":{
+              "metric":"write_time",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/hbase/regionserver/mutationsWithoutWALSize": {
+              "metric": "regionserver.Server.mutationsWithoutWALSize",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_avg_time": {
+              "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_99th_percentile": {
+              "metric": "regionserver.Server.Delete_99th_percentile",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/slowAppendCount": {
+              "metric": "regionserver.Server.slowAppendCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/slowIncrementCount": {
+              "metric": "regionserver.Server.slowIncrementCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/putRequestLatency_95th_percentile": {
+              "metric": "regionserver.Server.Mutate_95th_percentile",
+              "unit": "ms",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/compactionQueueSize": {
+              "metric": "regionserver.Server.compactionQueueLength",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/disk/part_max_used": {
+              "metric": "part_max_used",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/flushTime_num_ops": {
+              "metric": "regionserver.Server.FlushTime_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheCount": {
+              "metric": "regionserver.Server.blockCacheCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_num_ops": {
+              "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/getRequestLatency_75th_percentile": {
+              "metric": "regionserver.Server.Get_75th_percentile",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/stores": {
+              "metric": "regionserver.Server.storeCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_num_ops": {
+              "metric": "ugi.UgiMetrics.LoginFailureNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthenticationSuccesses": {
+              "metric": "regionserver.RegionServer.authenticationSuccesses",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/threadsRunnable": {
+              "metric": "jvm.JvmMetrics.ThreadsRunnable",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/threadsNew": {
+              "metric": "jvm.JvmMetrics.ThreadsNew",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_min": {
+              "metric": "regionserver.Server.Delete_min",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthorizationFailures": {
+              "metric": "regionserver.RegionServer.authorizationFailures",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_avg_time": {
+              "metric": "regionserver.RegionServer.QueueCallTime_median",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/getRequestLatency_num_ops": {
+              "metric": "regionserver.Server.Get_num_ops",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/ReceivedBytes": {
+              "metric": "regionserver.RegionServer.receivedBytes",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/NumOpenConnections": {
+              "metric": "regionserver.RegionServer.NumOpenConnections",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/gcTimeMillis": {
+              "metric": "jvm.JvmMetrics.GcTimeMillis",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTerminated": {
+              "metric": "jvm.JvmMetrics.ThreadsTerminated",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/putRequestLatency_max": {
+              "metric": "regionserver.Server.Mutate_max",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_num_ops": {
+              "metric": "regionserver.Server.Delete_num_ops",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/boottime": {
+              "metric": "boottime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/writeRequestsCount": {
+              "metric": "regionserver.Server.writeRequestCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/getRequestLatency_min": {
+              "metric": "regionserver.Server.Get_min",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_num_ops": {
+              "metric": "regionserver.RegionServer.ProcessCallTime_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/getRequestLatency_mean": {
+              "metric": "regionserver.Server.Get_mean",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logError": {
+              "metric": "jvm.JvmMetrics.LogError",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/putRequestLatency_75th_percentile": {
+              "metric": "regionserver.Server.Mutate_75th_percentile",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/regions": {
+              "metric": "regionserver.Server.regionCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheHitCount": {
+              "metric": "regionserver.Server.blockCacheHitCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/slowPutCount": {
+              "metric": "regionserver.Server.slowPutCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheFree": {
+              "metric": "regionserver.Server.blockCacheFreeSize",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheMissCount": {
+              "metric": "regionserver.Server.blockCacheMissCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/flushQueueSize": {
+              "metric": "regionserver.Server.flushQueueLength",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/blockCacheSize": {
+              "metric": "regionserver.Server.blockCacheSize",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/putRequestLatency_num_ops": {
+              "metric": "regionserver.Server.Mutate_num_ops",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsBlocked": {
+              "metric": "jvm.JvmMetrics.ThreadsBlocked",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_num_ops": {
+              "metric": "regionserver.RegionServer.QueueCallTime_num_ops",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/putRequestLatency_median": {
+              "metric": "regionserver.Server.Mutate_median",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_mean": {
+              "metric": "regionserver.Server.Delete_mean",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/cpu/cpu_aidle": {
+              "metric": "cpu_aidle",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/readRequestsCount": {
+              "metric": "regionserver.Server.readRequestCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/putRequestLatency_min": {
+              "metric": "regionserver.Server.Mutate_min",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/storefileIndexSizeMB": {
+              "metric": "regionserver.Server.storeFileIndexSize",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_median": {
+              "metric": "regionserver.Server.Delete_median",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/totalStaticIndexSizeKB": {
+              "metric": "regionserver.Server.staticIndexSize",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/putRequestLatency_mean": {
+              "metric": "regionserver.Server.Mutate_mean",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/mutationsWithoutWALCount": {
+              "metric": "regionserver.Server.mutationsWithoutWALCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/getRequestLatency_median": {
+              "metric": "regionserver.Server.Get_median",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/hbase/regionserver/deleteRequestLatency_max": {
+              "metric": "regionserver.Server.Delete_max",
+              "pointInTime": false,
+              "temporal": true
+           

<TRUNCATED>

[07/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_25.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_25.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_25.py
new file mode 100755
index 0000000..1f0ae18
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/stack-advisor/stack_advisor_25.py
@@ -0,0 +1,1940 @@
+#!/usr/bin/env ambari-python-wrap
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import math
+import traceback
+
+from ambari_commons.str_utils import string_set_equals
+from resource_management.core.logger import Logger
+from resource_management.core.exceptions import Fail
+from resource_management.libraries.functions.get_bare_principal import get_bare_principal
+
+try:
+  from stack_advisor_24 import *
+except ImportError:
+  #Ignore ImportError
+  print("stack_advisor_24 not found")
+
+class HDP25StackAdvisor(HDP24StackAdvisor):
+
+  def __init__(self):
+    super(HDP25StackAdvisor, self).__init__()
+    Logger.initialize_logger()
+    self.HIVE_INTERACTIVE_SITE = 'hive-interactive-site'
+    self.YARN_ROOT_DEFAULT_QUEUE_NAME = 'default'
+    self.AMBARI_MANAGED_LLAP_QUEUE_NAME = 'llap'
+    self.RANGER_TAGSYNC_SITE = 'ranger-tagsync-site';
+
+  def recommendOozieConfigurations(self, configurations, clusterData, services, hosts):
+    super(HDP25StackAdvisor,self).recommendOozieConfigurations(configurations, clusterData, services, hosts)
+    putOozieEnvProperty = self.putProperty(configurations, "oozie-env", services)
+
+    if not "oozie-env" in services["configurations"] :
+      Logger.info("No oozie configurations available")
+      return
+
+    if not "FALCON_SERVER" in clusterData["components"] :
+      Logger.info("Falcon is not part of the installation")
+      return
+
+    falconUser = 'falcon'
+
+    if "falcon-env" in services["configurations"] :
+      if "falcon_user" in services["configurations"]["falcon-env"]["properties"] :
+        falconUser = services["configurations"]["falcon-env"]["properties"]["falcon_user"]
+        Logger.info("Falcon user from configuration: %s " % falconUser)
+
+    Logger.info("Falcon user : %s" % falconUser)
+
+    oozieUser = 'oozie'
+
+    if "oozie_user" \
+      in services["configurations"]["oozie-env"]["properties"] :
+      oozieUser = services["configurations"]["oozie-env"]["properties"]["oozie_user"]
+      Logger.info("Oozie user from configuration %s" % oozieUser)
+
+    Logger.info("Oozie user %s" % oozieUser)
+
+    if "oozie_admin_users" \
+            in services["configurations"]["oozie-env"]["properties"] :
+      currentAdminUsers =  services["configurations"]["oozie-env"]["properties"]["oozie_admin_users"]
+      Logger.info("Oozie admin users from configuration %s" % currentAdminUsers)
+    else :
+      currentAdminUsers = "{0}, oozie-admin".format(oozieUser)
+      Logger.info("Setting default oozie admin users to %s" % currentAdminUsers)
+
+
+    if falconUser in currentAdminUsers :
+      Logger.info("Falcon user %s already member of  oozie admin users " % falconUser)
+      return
+
+    newAdminUsers = "{0},{1}".format(currentAdminUsers, falconUser)
+
+    Logger.info("new oozie admin users : %s" % newAdminUsers)
+
+    services["forced-configurations"].append({"type" : "oozie-env", "name" : "oozie_admin_users"})
+    putOozieEnvProperty("oozie_admin_users", newAdminUsers)
+
+  def createComponentLayoutRecommendations(self, services, hosts):
+    parentComponentLayoutRecommendations = super(HDP25StackAdvisor, self).createComponentLayoutRecommendations(
+      services, hosts)
+    return parentComponentLayoutRecommendations
+
+  def getComponentLayoutValidations(self, services, hosts):
+    parentItems = super(HDP25StackAdvisor, self).getComponentLayoutValidations(services, hosts)
+    childItems = []
+    if self.HIVE_INTERACTIVE_SITE in services['configurations']:
+      hsi_hosts = self.__getHostsForComponent(services, "HIVE", "HIVE_SERVER_INTERACTIVE")
+      if len(hsi_hosts) > 1:
+        message = "Only one host can install HIVE_SERVER_INTERACTIVE. "
+        childItems.append(
+          {"type": 'host-component', "level": 'ERROR', "message": message, "component-name": 'HIVE_SERVER_INTERACTIVE'})
+
+    parentItems.extend(childItems)
+    return parentItems
+
+  def getServiceConfigurationValidators(self):
+    parentValidators = super(HDP25StackAdvisor, self).getServiceConfigurationValidators()
+    childValidators = {
+      "ATLAS": {"application-properties": self.validateAtlasConfigurations},
+      "HIVE": {"hive-interactive-env": self.validateHiveInteractiveEnvConfigurations,
+               "hive-interactive-site": self.validateHiveInteractiveSiteConfigurations,
+               "hive-env": self.validateHiveConfigurationsEnv},
+      "YARN": {"yarn-site": self.validateYARNConfigurations},
+      "RANGER": {"ranger-tagsync-site": self.validateRangerTagsyncConfigurations},
+      "SPARK2": {"spark2-defaults": self.validateSpark2Defaults,
+                 "spark2-thrift-sparkconf": self.validateSpark2ThriftSparkConf}
+    }
+    self.mergeValidators(parentValidators, childValidators)
+    return parentValidators
+
+  def validateAtlasConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    application_properties = getSiteProperties(configurations, "application-properties")
+    validationItems = []
+
+    #<editor-fold desc="LDAP and AD">
+    auth_type = application_properties['atlas.authentication.method.ldap.type']
+    Logger.info("Validating Atlas configs, authentication type: %s" % str(auth_type))
+
+    # Required props
+    ldap_props = {"atlas.authentication.method.ldap.url": "",
+                  "atlas.authentication.method.ldap.userDNpattern": "uid=",
+                  "atlas.authentication.method.ldap.groupSearchBase": "",
+                  "atlas.authentication.method.ldap.groupSearchFilter": "",
+                  "atlas.authentication.method.ldap.groupRoleAttribute": "cn",
+                  "atlas.authentication.method.ldap.base.dn": "",
+                  "atlas.authentication.method.ldap.bind.dn": "",
+                  "atlas.authentication.method.ldap.bind.password": "",
+                  "atlas.authentication.method.ldap.user.searchfilter": ""
+    }
+    ad_props = {"atlas.authentication.method.ldap.ad.domain": "",
+                "atlas.authentication.method.ldap.ad.url": "",
+                "atlas.authentication.method.ldap.ad.base.dn": "",
+                "atlas.authentication.method.ldap.ad.bind.dn": "",
+                "atlas.authentication.method.ldap.ad.bind.password": "",
+                "atlas.authentication.method.ldap.ad.user.searchfilter": "(sAMAccountName={0})"
+    }
+
+    props_to_require = set()
+    if auth_type.lower() == "ldap":
+      props_to_require = set(ldap_props.keys())
+    elif auth_type.lower() == "ad":
+      props_to_require = set(ad_props.keys())
+    elif auth_type.lower() == "none":
+      pass
+
+    for prop in props_to_require:
+      if prop not in application_properties or application_properties[prop] is None or application_properties[prop].strip() == "":
+        validationItems.append({"config-name": prop,
+                                "item": self.getErrorItem("If authentication type is %s, this property is required." % auth_type)})
+    #</editor-fold>
+
+    if application_properties['atlas.graph.index.search.backend'] == 'solr5' and \
+            not application_properties['atlas.graph.index.search.solr.zookeeper-url']:
+      validationItems.append({"config-name": "atlas.graph.index.search.solr.zookeeper-url",
+                              "item": self.getErrorItem(
+                                  "If AMBARI_INFRA is not installed then the SOLR zookeeper url configuration must be specified.")})
+
+    if not application_properties['atlas.kafka.bootstrap.servers']:
+      validationItems.append({"config-name": "atlas.kafka.bootstrap.servers",
+                              "item": self.getErrorItem(
+                                  "If KAFKA is not installed then the Kafka bootstrap servers configuration must be specified.")})
+
+    if not application_properties['atlas.kafka.zookeeper.connect']:
+      validationItems.append({"config-name": "atlas.kafka.zookeeper.connect",
+                              "item": self.getErrorItem(
+                                  "If KAFKA is not installed then the Kafka zookeeper quorum configuration must be specified.")})
+
+    if application_properties['atlas.graph.storage.backend'] == 'hbase' and 'hbase-site' in services['configurations']:
+      hbase_zookeeper_quorum = services['configurations']['hbase-site']['properties']['hbase.zookeeper.quorum']
+
+      if not application_properties['atlas.graph.storage.hostname']:
+        validationItems.append({"config-name": "atlas.graph.storage.hostname",
+                                "item": self.getErrorItem(
+                                    "If HBASE is not installed then the hbase zookeeper quorum configuration must be specified.")})
+      elif string_set_equals(application_properties['atlas.graph.storage.hostname'], hbase_zookeeper_quorum):
+        validationItems.append({"config-name": "atlas.graph.storage.hostname",
+                                "item": self.getWarnItem(
+                                    "Atlas is configured to use the HBase installed in this cluster. If you would like Atlas to use another HBase instance, please configure this property and HBASE_CONF_DIR variable in atlas-env appropriately.")})
+
+      if not application_properties['atlas.audit.hbase.zookeeper.quorum']:
+        validationItems.append({"config-name": "atlas.audit.hbase.zookeeper.quorum",
+                                "item": self.getErrorItem(
+                                    "If HBASE is not installed then the audit hbase zookeeper quorum configuration must be specified.")})
+
+    elif application_properties['atlas.graph.storage.backend'] == 'hbase' and 'hbase-site' not in services[
+      'configurations']:
+      if not application_properties['atlas.graph.storage.hostname']:
+        validationItems.append({"config-name": "atlas.graph.storage.hostname",
+                                "item": self.getErrorItem(
+                                  "Atlas is not configured to use the HBase installed in this cluster. If you would like Atlas to use another HBase instance, please configure this property and HBASE_CONF_DIR variable in atlas-env appropriately.")})
+      if not application_properties['atlas.audit.hbase.zookeeper.quorum']:
+        validationItems.append({"config-name": "atlas.audit.hbase.zookeeper.quorum",
+                                "item": self.getErrorItem(
+                                  "If HBASE is not installed then the audit hbase zookeeper quorum configuration must be specified.")})
+
+    validationProblems = self.toConfigurationValidationProblems(validationItems, "application-properties")
+    return validationProblems
+
+  def validateSpark2Defaults(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = [
+      {
+        "config-name": 'spark.yarn.queue',
+        "item": self.validatorYarnQueue(properties, recommendedDefaults, 'spark.yarn.queue', services)
+      }
+    ]
+    return self.toConfigurationValidationProblems(validationItems, "spark2-defaults")
+
+  def validateSpark2ThriftSparkConf(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = [
+      {
+        "config-name": 'spark.yarn.queue',
+        "item": self.validatorYarnQueue(properties, recommendedDefaults, 'spark.yarn.queue', services)
+      }
+    ]
+    return self.toConfigurationValidationProblems(validationItems, "spark2-thrift-sparkconf")
+
+  def validateYarnConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    parentValidationProblems = super(HDP25StackAdvisor, self).validateYARNConfigurations(properties, recommendedDefaults, configurations, services, hosts)
+    yarn_site_properties = getSiteProperties(configurations, "yarn-site")
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    componentsListList = [service["components"] for service in services["services"]]
+    componentsList = [item["StackServiceComponents"] for sublist in componentsListList for item in sublist]
+    validationItems = []
+    if self.HIVE_INTERACTIVE_SITE in services['configurations']:
+      hsi_hosts = self.__getHostsForComponent(services, "HIVE", "HIVE_SERVER_INTERACTIVE")
+      if len(hsi_hosts) > 0:
+        # HIVE_SERVER_INTERACTIVE is mapped to a host
+        if 'yarn.resourcemanager.work-preserving-recovery.enabled' not in yarn_site_properties or \
+                'true' != yarn_site_properties['yarn.resourcemanager.work-preserving-recovery.enabled']:
+          validationItems.append({"config-name": "yarn.resourcemanager.work-preserving-recovery.enabled",
+                                      "item": self.getWarnItem(
+                                        "While enabling HIVE_SERVER_INTERACTIVE it is recommended that you enable work preserving restart in YARN.")})
+
+    validationProblems = self.toConfigurationValidationProblems(validationItems, "yarn-site")
+    validationProblems.extend(parentValidationProblems)
+    return validationProblems
+
+  """
+  Does the following validation checks for HIVE_SERVER_INTERACTIVE's hive-interactive-site configs.
+      1. Queue selected in 'hive.llap.daemon.queue.name' config should be sized >= to minimum required to run LLAP
+         and Hive2 app.
+      2. Queue selected in 'hive.llap.daemon.queue.name' config state should not be 'STOPPED'.
+      3. 'hive.server2.enable.doAs' config should be set to 'false' for Hive2.
+      4. 'Maximum Total Concurrent Queries'(hive.server2.tez.sessions.per.default.queue) should not consume more that 50% of selected queue for LLAP.
+      5. if 'llap' queue is selected, in order to run Service Checks, 'remaining available capacity' in cluster is atleast 512 MB.
+  """
+  def validateHiveInteractiveSiteConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = []
+    hsi_hosts = self.__getHostsForComponent(services, "HIVE", "HIVE_SERVER_INTERACTIVE")
+    curr_selected_queue_for_llap = None
+    curr_selected_queue_for_llap_cap_perc =  None
+    MIN_ASSUMED_CAP_REQUIRED_FOR_SERVICE_CHECKS = 512
+    current_selected_queue_for_llap_cap =  None
+    if self.HIVE_INTERACTIVE_SITE in services['configurations']:
+      if len(hsi_hosts) > 0:
+        # Get total cluster capacity
+        node_manager_host_list = self.get_node_manager_hosts(services, hosts)
+        node_manager_cnt = len(node_manager_host_list)
+        yarn_nm_mem_in_mb = self.get_yarn_nm_mem_in_mb(services, configurations)
+        total_cluster_capacity = node_manager_cnt * yarn_nm_mem_in_mb
+
+        capacity_scheduler_properties, received_as_key_value_pair = self.getCapacitySchedulerProperties(services)
+        if capacity_scheduler_properties:
+          if self.HIVE_INTERACTIVE_SITE in services['configurations'] and \
+              'hive.llap.daemon.queue.name' in services['configurations'][self.HIVE_INTERACTIVE_SITE]['properties']:
+            curr_selected_queue_for_llap = services['configurations'][self.HIVE_INTERACTIVE_SITE]['properties']['hive.llap.daemon.queue.name']
+            if curr_selected_queue_for_llap:
+              current_selected_queue_for_llap_cap = self.__getSelectedQueueTotalCap(capacity_scheduler_properties,
+                                                                                  curr_selected_queue_for_llap, total_cluster_capacity)
+              if current_selected_queue_for_llap_cap:
+                curr_selected_queue_for_llap_cap_perc = int(current_selected_queue_for_llap_cap * 100 / total_cluster_capacity)
+                min_reqd_queue_cap_perc = self.min_queue_perc_reqd_for_llap_and_hive_app(services, hosts, configurations)
+
+                # Validate that the selected queue in 'hive.llap.daemon.queue.name' should be sized >= to minimum required
+                # to run LLAP and Hive2 app.
+                if curr_selected_queue_for_llap_cap_perc < min_reqd_queue_cap_perc:
+                  errMsg1 =  "Selected queue '{0}' capacity ({1}%) is less than minimum required capacity ({2}%) for LLAP " \
+                            "app to run".format(curr_selected_queue_for_llap, curr_selected_queue_for_llap_cap_perc, min_reqd_queue_cap_perc)
+                  validationItems.append({"config-name": "hive.llap.daemon.queue.name","item": self.getErrorItem(errMsg1)})
+              else:
+                Logger.error("Couldn't retrieve '{0}' queue's capacity from 'capacity-scheduler' while doing validation checks for "
+                 "Hive Server Interactive.".format(curr_selected_queue_for_llap))
+
+              # Validate that current selected queue in 'hive.llap.daemon.queue.name' state is not STOPPED.
+              llap_selected_queue_state = self.__getQueueStateFromCapacityScheduler(capacity_scheduler_properties, curr_selected_queue_for_llap)
+              if llap_selected_queue_state:
+                if llap_selected_queue_state == "STOPPED":
+                  errMsg2 =  "Selected queue '{0}' current state is : '{1}'. It is required to be in 'RUNNING' state for LLAP to run"\
+                    .format(curr_selected_queue_for_llap, llap_selected_queue_state)
+                  validationItems.append({"config-name": "hive.llap.daemon.queue.name","item": self.getErrorItem(errMsg2)})
+              else:
+                Logger.error("Couldn't retrieve '{0}' queue's state from 'capacity-scheduler' while doing validation checks for "
+                             "Hive Server Interactive.".format(curr_selected_queue_for_llap))
+            else:
+              Logger.error("Couldn't retrieve current selection for 'hive.llap.daemon.queue.name' while doing validation "
+                           "checks for Hive Server Interactive.")
+          else:
+            Logger.error("Couldn't retrieve 'hive.llap.daemon.queue.name' config from 'hive-interactive-site' while doing "
+                         "validation checks for Hive Server Interactive.")
+            pass
+        else:
+          Logger.error("Couldn't retrieve 'capacity-scheduler' properties while doing validation checks for Hive Server Interactive.")
+          pass
+
+      if self.HIVE_INTERACTIVE_SITE in services['configurations']:
+        # Validate that 'hive.server2.enable.doAs' config is not set to 'true' for Hive2.
+        if 'hive.server2.enable.doAs' in services['configurations'][self.HIVE_INTERACTIVE_SITE]['properties']:
+          hive2_enable_do_as = services['configurations'][self.HIVE_INTERACTIVE_SITE]['properties']['hive.server2.enable.doAs']
+          if hive2_enable_do_as == 'true':
+            validationItems.append({"config-name": "hive.server2.enable.doAs","item": self.getErrorItem("Value should be set to 'false' for Hive2.")})
+
+        # Validate that 'Maximum Total Concurrent Queries'(hive.server2.tez.sessions.per.default.queue) is not consuming more that
+        # 50% of selected queue for LLAP.
+        if current_selected_queue_for_llap_cap and 'hive.server2.tez.sessions.per.default.queue' in \
+          services['configurations']['hive-interactive-site']['properties']:
+          num_tez_sessions = services['configurations']['hive-interactive-site']['properties']['hive.server2.tez.sessions.per.default.queue']
+          if num_tez_sessions:
+            num_tez_sessions = long(num_tez_sessions)
+            yarn_min_container_size = self.get_yarn_min_container_size(services, configurations)
+            tez_am_container_size = self.calculate_tez_am_container_size(long(total_cluster_capacity))
+            normalized_tez_am_container_size = self._normalizeUp(tez_am_container_size, yarn_min_container_size)
+            llap_selected_queue_cap_remaining = current_selected_queue_for_llap_cap - (normalized_tez_am_container_size * num_tez_sessions)
+            if llap_selected_queue_cap_remaining <= current_selected_queue_for_llap_cap/2:
+              errMsg3 = " Reducing the 'Maximum Total Concurrent Queries' (value: {0}) is advisable as it is consuming more than 50% of " \
+                        "'{1}' queue for LLAP.".format(num_tez_sessions, curr_selected_queue_for_llap)
+              validationItems.append({"config-name": "hive.server2.tez.sessions.per.default.queue","item": self.getWarnItem(errMsg3)})
+
+      # Validate that 'remaining available capacity' in cluster is atleast 512 MB, after 'llap' queue is selected,
+      # in order to run Service Checks.
+      if curr_selected_queue_for_llap and curr_selected_queue_for_llap_cap_perc and \
+          curr_selected_queue_for_llap == self.AMBARI_MANAGED_LLAP_QUEUE_NAME:
+        curr_selected_queue_for_llap_cap = float(curr_selected_queue_for_llap_cap_perc) / 100 * total_cluster_capacity
+        available_cap_in_cluster = total_cluster_capacity - curr_selected_queue_for_llap_cap
+        if available_cap_in_cluster < MIN_ASSUMED_CAP_REQUIRED_FOR_SERVICE_CHECKS:
+          errMsg4 =  "Capacity used by '{0}' queue is '{1}'. Service checks may not run as remaining available capacity " \
+                     "({2}) in cluster is less than 512 MB.".format(self.AMBARI_MANAGED_LLAP_QUEUE_NAME, curr_selected_queue_for_llap_cap, available_cap_in_cluster)
+          validationItems.append({"config-name": "hive.llap.daemon.queue.name","item": self.getWarnItem(errMsg4)})
+
+    validationProblems = self.toConfigurationValidationProblems(validationItems, "hive-interactive-site")
+    return validationProblems
+
+  def validateHiveConfigurationsEnv(self, properties, recommendedDefaults, configurations, services, hosts):
+    parentValidationProblems = super(HDP25StackAdvisor, self).validateHiveConfigurationsEnv(properties, recommendedDefaults, configurations, services, hosts)
+    hive_site_properties = self.getSiteProperties(configurations, "hive-site")
+    hive_env_properties = self.getSiteProperties(configurations, "hive-env")
+    validationItems = []
+
+    if 'hive.server2.authentication' in hive_site_properties and hive_site_properties['hive.server2.authentication'] in ["LDAP","PAM"]:
+      if 'alert_ldap_username' not in hive_env_properties or hive_env_properties['alert_ldap_username'] == "":
+        validationItems.append({"config-name": "alert_ldap_username",
+                                "item": self.getWarnItem(
+                                  "Provide an user to be used for alerts. Hive authentication type LDAP and PAM requires valid credentials for the alerts.")})
+      if 'alert_ldap_password' not in hive_env_properties or hive_env_properties['alert_ldap_password'] == "":
+        validationItems.append({"config-name": "alert_ldap_password",
+                                "item": self.getWarnItem(
+                                  "Provide the password for the alert user. Hive authentication type LDAP and PAM requires valid credentials for the alerts.")})
+
+    validationProblems = self.toConfigurationValidationProblems(validationItems, "hive-env")
+    validationProblems.extend(parentValidationProblems)
+    return validationProblems
+
+  def validateHiveInteractiveEnvConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    hive_site_env_properties = getSiteProperties(configurations, "hive-interactive-env")
+    validationItems = []
+    if self.HIVE_INTERACTIVE_SITE in services['configurations']:
+      hsi_hosts = self.__getHostsForComponent(services, "HIVE", "HIVE_SERVER_INTERACTIVE")
+      if len(hsi_hosts) > 0:
+        # HIVE_SERVER_INTERACTIVE is mapped to a host
+        if 'enable_hive_interactive' not in hive_site_env_properties or (
+              'enable_hive_interactive' in hive_site_env_properties and hive_site_env_properties[
+            'enable_hive_interactive'].lower() != 'true'):
+          validationItems.append({"config-name": "enable_hive_interactive",
+                                  "item": self.getErrorItem(
+                                    "HIVE_SERVER_INTERACTIVE requires enable_hive_interactive in hive-interactive-env set to true.")})
+        if 'hive_server_interactive_host' in hive_site_env_properties:
+          hsi_host = hsi_hosts[0]
+          if hive_site_env_properties['hive_server_interactive_host'].lower() != hsi_host.lower():
+            validationItems.append({"config-name": "hive_server_interactive_host",
+                                    "item": self.getErrorItem(
+                                      "HIVE_SERVER_INTERACTIVE requires hive_server_interactive_host in hive-interactive-env set to its host name.")})
+          pass
+        if 'hive_server_interactive_host' not in hive_site_env_properties:
+          validationItems.append({"config-name": "hive_server_interactive_host",
+                                  "item": self.getErrorItem(
+                                    "HIVE_SERVER_INTERACTIVE requires hive_server_interactive_host in hive-interactive-env set to its host name.")})
+          pass
+
+      else:
+        # no  HIVE_SERVER_INTERACTIVE
+        if 'enable_hive_interactive' in hive_site_env_properties and hive_site_env_properties[
+          'enable_hive_interactive'].lower() != 'false':
+          validationItems.append({"config-name": "enable_hive_interactive",
+                                  "item": self.getErrorItem(
+                                    "enable_hive_interactive in hive-interactive-env should be set to false.")})
+          pass
+        pass
+
+    validationProblems = self.toConfigurationValidationProblems(validationItems, "hive-interactive-env")
+    return validationProblems
+
+  def getServiceConfigurationRecommenderDict(self):
+    parentRecommendConfDict = super(HDP25StackAdvisor, self).getServiceConfigurationRecommenderDict()
+    childRecommendConfDict = {
+      "RANGER": self.recommendRangerConfigurations,
+      "HBASE": self.recommendHBASEConfigurations,
+      "HIVE": self.recommendHIVEConfigurations,
+      "ATLAS": self.recommendAtlasConfigurations,
+      "RANGER_KMS": self.recommendRangerKMSConfigurations,
+      "STORM": self.recommendStormConfigurations,
+      "OOZIE": self.recommendOozieConfigurations,
+      "SPARK2": self.recommendSpark2Configurations
+    }
+    parentRecommendConfDict.update(childRecommendConfDict)
+    return parentRecommendConfDict
+
+  def recommendSpark2Configurations(self, configurations, clusterData, services, hosts):
+    """
+    :type configurations dict
+    :type clusterData dict
+    :type services dict
+    :type hosts dict
+    """
+    putSparkProperty = self.putProperty(configurations, "spark2-defaults", services)
+    putSparkThriftSparkConf = self.putProperty(configurations, "spark2-thrift-sparkconf", services)
+
+    spark_queue = self.recommendYarnQueue(services, "spark2-defaults", "spark.yarn.queue")
+    if spark_queue is not None:
+      putSparkProperty("spark.yarn.queue", spark_queue)
+
+    spart_thrift_queue = self.recommendYarnQueue(services, "spark2-thrift-sparkconf", "spark.yarn.queue")
+    if spart_thrift_queue is not None:
+      putSparkThriftSparkConf("spark.yarn.queue", spart_thrift_queue)
+
+  def recommendStormConfigurations(self, configurations, clusterData, services, hosts):
+    super(HDP25StackAdvisor, self).recommendStormConfigurations(configurations, clusterData, services, hosts)
+    storm_site = getServicesSiteProperties(services, "storm-site")
+    putStormSiteProperty = self.putProperty(configurations, "storm-site", services)
+    putStormSiteAttributes = self.putPropertyAttribute(configurations, "storm-site")
+    security_enabled = (storm_site is not None and "storm.zookeeper.superACL" in storm_site)
+    
+    if security_enabled:
+      _storm_principal_name = services['configurations']['storm-env']['properties']['storm_principal_name']
+      storm_bare_jaas_principal = get_bare_principal(_storm_principal_name)
+      if 'nimbus.impersonation.acl' in storm_site:  
+        storm_nimbus_impersonation_acl = storm_site["nimbus.impersonation.acl"]
+        storm_nimbus_impersonation_acl.replace('{{storm_bare_jaas_principal}}', storm_bare_jaas_principal)
+        putStormSiteProperty('nimbus.impersonation.acl', storm_nimbus_impersonation_acl)
+    rangerPluginEnabled = ''
+    if 'ranger-storm-plugin-properties' in configurations and 'ranger-storm-plugin-enabled' in  configurations['ranger-storm-plugin-properties']['properties']:
+      rangerPluginEnabled = configurations['ranger-storm-plugin-properties']['properties']['ranger-storm-plugin-enabled']
+    elif 'ranger-storm-plugin-properties' in services['configurations'] and 'ranger-storm-plugin-enabled' in services['configurations']['ranger-storm-plugin-properties']['properties']:
+      rangerPluginEnabled = services['configurations']['ranger-storm-plugin-properties']['properties']['ranger-storm-plugin-enabled']
+
+    storm_authorizer_class = 'org.apache.storm.security.auth.authorizer.SimpleACLAuthorizer'
+    ranger_authorizer_class = 'org.apache.ranger.authorization.storm.authorizer.RangerStormAuthorizer'
+    # Cluster is kerberized
+    if security_enabled:
+      if rangerPluginEnabled and (rangerPluginEnabled.lower() == 'Yes'.lower()):
+        putStormSiteProperty('nimbus.authorizer',ranger_authorizer_class)
+      elif rangerPluginEnabled and (rangerPluginEnabled.lower() == 'No'.lower()) and (services["configurations"]["storm-site"]["properties"]["nimbus.authorizer"] == ranger_authorizer_class):
+        putStormSiteProperty('nimbus.authorizer', storm_authorizer_class)
+    else:
+      putStormSiteAttributes('nimbus.authorizer', 'delete', 'true')
+
+  def constructAtlasRestAddress(self, services, hosts):
+    """
+    :param services: Collection of services in the cluster with configs
+    :param hosts: Collection of hosts in the cluster
+    :return: The suggested property for atlas.rest.address if it is valid, otherwise, None
+    """
+    atlas_rest_address = None
+    services_list = [service["StackServices"]["service_name"] for service in services["services"]]
+    is_atlas_in_cluster = "ATLAS" in services_list
+
+    atlas_server_hosts_info = self.getHostsWithComponent("ATLAS", "ATLAS_SERVER", services, hosts)
+    if is_atlas_in_cluster and atlas_server_hosts_info and len(atlas_server_hosts_info) > 0:
+      # Multiple Atlas Servers can exist, so sort by hostname to create deterministic csv
+      atlas_host_names = [e['Hosts']['host_name'] for e in atlas_server_hosts_info]
+      if len(atlas_host_names) > 1:
+        atlas_host_names = sorted(atlas_host_names)
+
+      scheme = "http"
+      metadata_port = "21000"
+      atlas_server_default_https_port = "21443"
+      tls_enabled = "false"
+      if 'application-properties' in services['configurations']:
+        if 'atlas.enableTLS' in services['configurations']['application-properties']['properties']:
+          tls_enabled = services['configurations']['application-properties']['properties']['atlas.enableTLS']
+        if 'atlas.server.http.port' in services['configurations']['application-properties']['properties']:
+          metadata_port = str(services['configurations']['application-properties']['properties']['atlas.server.http.port'])
+
+        if str(tls_enabled).lower() == "true":
+          scheme = "https"
+          if 'atlas.server.https.port' in services['configurations']['application-properties']['properties']:
+            metadata_port = str(services['configurations']['application-properties']['properties']['atlas.server.https.port'])
+          else:
+            metadata_port = atlas_server_default_https_port
+
+      atlas_rest_address_list = ["{0}://{1}:{2}".format(scheme, hostname, metadata_port) for hostname in atlas_host_names]
+      atlas_rest_address = ",".join(atlas_rest_address_list)
+      Logger.info("Constructing atlas.rest.address=%s" % atlas_rest_address)
+    return atlas_rest_address
+
+  def recommendAtlasConfigurations(self, configurations, clusterData, services, hosts):
+    putAtlasApplicationProperty = self.putProperty(configurations, "application-properties", services)
+    putAtlasRangerPluginProperty = self.putProperty(configurations, "ranger-atlas-plugin-properties", services)
+    putAtlasEnvProperty = self.putProperty(configurations, "atlas-env", services)
+
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+
+    # Generate atlas.rest.address since the value is always computed
+    atlas_rest_address = self.constructAtlasRestAddress(services, hosts)
+    if atlas_rest_address is not None:
+      putAtlasApplicationProperty("atlas.rest.address", atlas_rest_address)
+
+    if "AMBARI_INFRA" in servicesList and 'infra-solr-env' in services['configurations']:
+      if 'infra_solr_znode' in services['configurations']['infra-solr-env']['properties']:
+        infra_solr_znode = services['configurations']['infra-solr-env']['properties']['infra_solr_znode']
+      else:
+        infra_solr_znode = None
+
+      zookeeper_hosts = self.getHostNamesWithComponent("ZOOKEEPER", "ZOOKEEPER_SERVER", services)
+      zookeeper_host_arr = []
+
+      zookeeper_port = self.getZKPort(services)
+      for i in range(len(zookeeper_hosts)):
+        zookeeper_host = zookeeper_hosts[i] + ':' + zookeeper_port
+        if infra_solr_znode is not None:
+          zookeeper_host += infra_solr_znode
+        zookeeper_host_arr.append(zookeeper_host)
+
+      solr_zookeeper_url = ",".join(zookeeper_host_arr)
+
+      putAtlasApplicationProperty('atlas.graph.index.search.solr.zookeeper-url', solr_zookeeper_url)
+    else:
+      putAtlasApplicationProperty('atlas.graph.index.search.solr.zookeeper-url', "")
+
+    # Kafka section
+    if "KAFKA" in servicesList and 'kafka-broker' in services['configurations']:
+      kafka_hosts = self.getHostNamesWithComponent("KAFKA", "KAFKA_BROKER", services)
+
+      if 'port' in services['configurations']['kafka-broker']['properties']:
+        kafka_broker_port = services['configurations']['kafka-broker']['properties']['port']
+      else:
+        kafka_broker_port = '6667'
+
+      kafka_host_arr = []
+      for i in range(len(kafka_hosts)):
+        kafka_host_arr.append(kafka_hosts[i] + ':' + kafka_broker_port)
+
+      kafka_bootstrap_servers = ",".join(kafka_host_arr)
+
+      if 'zookeeper.connect' in services['configurations']['kafka-broker']['properties']:
+        kafka_zookeeper_connect = services['configurations']['kafka-broker']['properties']['zookeeper.connect']
+      else:
+        kafka_zookeeper_connect = None
+
+      putAtlasApplicationProperty('atlas.kafka.bootstrap.servers', kafka_bootstrap_servers)
+      putAtlasApplicationProperty('atlas.kafka.zookeeper.connect', kafka_zookeeper_connect)
+    else:
+      putAtlasApplicationProperty('atlas.kafka.bootstrap.servers', "")
+      putAtlasApplicationProperty('atlas.kafka.zookeeper.connect', "")
+
+    if "HBASE" in servicesList and 'hbase-site' in services['configurations']:
+      if 'hbase.zookeeper.quorum' in services['configurations']['hbase-site']['properties']:
+        hbase_zookeeper_quorum = services['configurations']['hbase-site']['properties']['hbase.zookeeper.quorum']
+      else:
+        hbase_zookeeper_quorum = ""
+
+      putAtlasApplicationProperty('atlas.graph.storage.hostname', hbase_zookeeper_quorum)
+      putAtlasApplicationProperty('atlas.audit.hbase.zookeeper.quorum', hbase_zookeeper_quorum)
+    else:
+      putAtlasApplicationProperty('atlas.graph.storage.hostname', "")
+      putAtlasApplicationProperty('atlas.audit.hbase.zookeeper.quorum', "")
+
+    if "ranger-env" in services["configurations"] and "ranger-atlas-plugin-properties" in services["configurations"] and \
+        "ranger-atlas-plugin-enabled" in services["configurations"]["ranger-env"]["properties"]:
+      ranger_atlas_plugin_enabled = services["configurations"]["ranger-env"]["properties"]["ranger-atlas-plugin-enabled"]
+      putAtlasRangerPluginProperty('ranger-atlas-plugin-enabled', ranger_atlas_plugin_enabled)
+
+    ranger_atlas_plugin_enabled = ''
+    if 'ranger-atlas-plugin-properties' in configurations and 'ranger-atlas-plugin-enabled' in configurations['ranger-atlas-plugin-properties']['properties']:
+      ranger_atlas_plugin_enabled = configurations['ranger-atlas-plugin-properties']['properties']['ranger-atlas-plugin-enabled']
+    elif 'ranger-atlas-plugin-properties' in services['configurations'] and 'ranger-atlas-plugin-enabled' in services['configurations']['ranger-atlas-plugin-properties']['properties']:
+      ranger_atlas_plugin_enabled = services['configurations']['ranger-atlas-plugin-properties']['properties']['ranger-atlas-plugin-enabled']
+
+    if ranger_atlas_plugin_enabled and (ranger_atlas_plugin_enabled.lower() == 'Yes'.lower()):
+      putAtlasApplicationProperty('atlas.authorizer.impl','ranger')
+    else:
+      putAtlasApplicationProperty('atlas.authorizer.impl','simple')
+
+    #atlas server memory settings
+    if 'atlas-env' in services['configurations']:
+      atlas_server_metadata_size = 50000
+      if 'atlas_server_metadata_size' in services['configurations']['atlas-env']['properties']:
+        atlas_server_metadata_size = int(services['configurations']['atlas-env']['properties']['atlas_server_metadata_size'])
+
+      atlas_server_xmx = 2048
+
+      if 300000 <= atlas_server_metadata_size < 500000:
+        atlas_server_xmx = 1024*5
+      if 500000 <= atlas_server_metadata_size < 1000000:
+        atlas_server_xmx = 1024*10
+      if atlas_server_metadata_size >= 1000000:
+        atlas_server_xmx = 1024*16
+
+      atlas_server_max_new_size = (atlas_server_xmx / 100) * 30
+
+      putAtlasEnvProperty("atlas_server_xmx", atlas_server_xmx)
+      putAtlasEnvProperty("atlas_server_max_new_size", atlas_server_max_new_size)
+
+  def recommendHBASEConfigurations(self, configurations, clusterData, services, hosts):
+    super(HDP25StackAdvisor, self).recommendHBASEConfigurations(configurations, clusterData, services, hosts)
+    putHbaseSiteProperty = self.putProperty(configurations, "hbase-site", services)
+    appendCoreSiteProperty = self.updateProperty(configurations, "core-site", services)
+
+    if "cluster-env" in services["configurations"] \
+         and "security_enabled" in services["configurations"]["cluster-env"]["properties"] \
+         and services["configurations"]["cluster-env"]["properties"]["security_enabled"].lower() == "true":
+      # Set the master's UI to readonly
+      putHbaseSiteProperty('hbase.master.ui.readonly', 'true')
+
+      phoenix_query_server_hosts = self.get_phoenix_query_server_hosts(services, hosts)
+      Logger.debug("Calculated Phoenix Query Server hosts: %s" % str(phoenix_query_server_hosts))
+      if phoenix_query_server_hosts:
+        Logger.debug("Attempting to update hadoop.proxyuser.HTTP.hosts with %s" % str(phoenix_query_server_hosts))
+        # The PQS hosts we want to ensure are set
+        new_value = ','.join(phoenix_query_server_hosts)
+        # Compute the unique set of hosts for the property
+        def updateCallback(originalValue, newValue):
+          Logger.debug("Original hadoop.proxyuser.HTTP.hosts value %s, appending %s" % (originalValue, newValue))
+          # Only update the original value if it's not whitespace only
+          if originalValue and not originalValue.isspace():
+            hosts = originalValue.split(',')
+            # Add in the new hosts if we have some
+            if newValue and not newValue.isspace():
+              hosts.extend(newValue.split(','))
+            # Return the combined (uniqued) list of hosts
+            result = ','.join(set(hosts))
+            Logger.debug("Setting final to %s" % result)
+            return result
+          else:
+            Logger.debug("Setting final value to %s" % newValue)
+            return newValue
+        # Update the proxyuser setting, deferring to out callback to merge results together
+        appendCoreSiteProperty('hadoop.proxyuser.HTTP.hosts', new_value, updateCallback)
+      else:
+        Logger.debug("No phoenix query server hosts to update")
+    else:
+      putHbaseSiteProperty('hbase.master.ui.readonly', 'false')
+
+  """
+  Returns the list of Phoenix Query Server host names, or None.
+  """
+  def get_phoenix_query_server_hosts(self, services, hosts):
+    if len(hosts['items']) > 0:
+      phoenix_query_server_hosts = self.getHostsWithComponent("HBASE", "PHOENIX_QUERY_SERVER", services, hosts)
+      if phoenix_query_server_hosts is None:
+        return []
+      return [host['Hosts']['host_name'] for host in phoenix_query_server_hosts]
+
+  def recommendHIVEConfigurations(self, configurations, clusterData, services, hosts):
+    super(HDP25StackAdvisor, self).recommendHIVEConfigurations(configurations, clusterData, services, hosts)
+    putHiveInteractiveEnvProperty = self.putProperty(configurations, "hive-interactive-env", services)
+    putHiveInteractiveSiteProperty = self.putProperty(configurations, self.HIVE_INTERACTIVE_SITE, services)
+    putHiveInteractiveEnvPropertyAttribute = self.putPropertyAttribute(configurations, "hive-interactive-env")
+
+    # For 'Hive Server Interactive', if the component exists.
+    hsi_hosts = self.__getHostsForComponent(services, "HIVE", "HIVE_SERVER_INTERACTIVE")
+    if len(hsi_hosts) > 0:
+      hsi_host = hsi_hosts[0]
+      putHiveInteractiveEnvProperty('enable_hive_interactive', 'true')
+      putHiveInteractiveEnvProperty('hive_server_interactive_host', hsi_host)
+
+      # Update 'hive.llap.daemon.queue.name' property attributes if capacity scheduler is changed.
+      if self.HIVE_INTERACTIVE_SITE in services['configurations']:
+        if 'hive.llap.daemon.queue.name' in services['configurations'][self.HIVE_INTERACTIVE_SITE]['properties']:
+          self.setLlapDaemonQueuePropAttributesAndCapSliderVisibility(services, configurations)
+
+          # Update 'hive.server2.tez.default.queues' value
+          hive_tez_default_queue = None
+          if 'hive-interactive-site' in configurations and \
+              'hive.llap.daemon.queue.name' in configurations[self.HIVE_INTERACTIVE_SITE]['properties']:
+            hive_tez_default_queue = configurations[self.HIVE_INTERACTIVE_SITE]['properties']['hive.llap.daemon.queue.name']
+            Logger.info("'hive.llap.daemon.queue.name' value from configurations : '{0}'".format(hive_tez_default_queue))
+          if not hive_tez_default_queue:
+            hive_tez_default_queue = services['configurations'][self.HIVE_INTERACTIVE_SITE]['properties']['hive.llap.daemon.queue.name']
+            Logger.info("'hive.llap.daemon.queue.name' value from services : '{0}'".format(hive_tez_default_queue))
+          if hive_tez_default_queue:
+            putHiveInteractiveSiteProperty("hive.server2.tez.default.queues", hive_tez_default_queue)
+            Logger.info("Updated 'hive.server2.tez.default.queues' config : '{0}'".format(hive_tez_default_queue))
+    else:
+      putHiveInteractiveEnvProperty('enable_hive_interactive', 'false')
+      putHiveInteractiveEnvPropertyAttribute("llap_queue_capacity", "visible", "false")
+
+    if self.HIVE_INTERACTIVE_SITE in services['configurations'] and \
+        'hive.llap.zk.sm.connectionString' in services['configurations'][self.HIVE_INTERACTIVE_SITE]['properties']:
+      # Fill the property 'hive.llap.zk.sm.connectionString' required by Hive Server Interactive (HiveServer2)
+      zookeeper_host_port = self.getZKHostPortString(services)
+      if zookeeper_host_port:
+        putHiveInteractiveSiteProperty("hive.llap.zk.sm.connectionString", zookeeper_host_port)
+    pass
+
+  def recommendYARNConfigurations(self, configurations, clusterData, services, hosts):
+    super(HDP25StackAdvisor, self).recommendYARNConfigurations(configurations, clusterData, services, hosts)
+
+    # Queue 'llap' creation/removal logic (Used by Hive Interactive server and associated LLAP)
+    if 'hive-interactive-env' in services['configurations'] and \
+        'enable_hive_interactive' in services['configurations']['hive-interactive-env']['properties']:
+      enable_hive_interactive = services['configurations']['hive-interactive-env']['properties']['enable_hive_interactive']
+      LLAP_QUEUE_NAME = 'llap'
+
+      # Hive Server interactive is already added or getting added
+      if enable_hive_interactive == 'true':
+        self.checkAndManageLlapQueue(services, configurations, hosts, LLAP_QUEUE_NAME)
+        self.updateLlapConfigs(configurations, services, hosts, LLAP_QUEUE_NAME)
+      else:  # When Hive Interactive Server is in 'off/removed' state.
+        self.checkAndStopLlapQueue(services, configurations, LLAP_QUEUE_NAME)
+
+    putYarnSiteProperty = self.putProperty(configurations, "yarn-site", services)
+    stack_root = "/usr/hdp"
+    if "cluster-env" in services["configurations"] and "stack_root" in services["configurations"]["cluster-env"]["properties"]:
+      stack_root = services["configurations"]["cluster-env"]["properties"]["stack_root"]
+
+    timeline_plugin_classes_values = []
+    timeline_plugin_classpath_values = []
+
+    if self.__isServiceDeployed(services, "TEZ"):
+      timeline_plugin_classes_values.append('org.apache.tez.dag.history.logging.ats.TimelineCachePluginImpl')
+
+    if self.__isServiceDeployed(services, "SPARK"):
+      timeline_plugin_classes_values.append('org.apache.spark.deploy.history.yarn.plugin.SparkATSPlugin')
+      timeline_plugin_classpath_values.append(stack_root + "/${hdp.version}/spark/hdpLib/*")
+
+    putYarnSiteProperty('yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes', ",".join(timeline_plugin_classes_values))
+    putYarnSiteProperty('yarn.timeline-service.entity-group-fs-store.group-id-plugin-classpath', ":".join(timeline_plugin_classpath_values))
+
+  """
+  Entry point for updating Hive's 'LLAP app' configs namely : (1). num_llap_nodes (2). hive.llap.daemon.yarn.container.mb
+  (3). hive.llap.daemon.num.executors (4). hive.llap.io.memory.size (5). llap_heap_size (6). slider_am_container_mb,
+  and (7). hive.server2.tez.sessions.per.default.queue
+
+    The trigger point for updating LLAP configs (mentioned above) is change in values of any of the following:
+    (1). 'enable_hive_interactive' set to 'true' (2). 'llap_queue_capacity' (3). 'hive.server2.tez.sessions.per.default.queue'
+    (4). Change in queue selection for config 'hive.llap.daemon.queue.name'.
+
+    If change in value for 'llap_queue_capacity' or 'hive.server2.tez.sessions.per.default.queue' is detected, that config
+    value is not calulated, but read and use in calculation for dependent configs.
+  """
+  def updateLlapConfigs(self, configurations, services, hosts, llap_queue_name):
+    putHiveInteractiveSiteProperty = self.putProperty(configurations, self.HIVE_INTERACTIVE_SITE, services)
+    putHiveInteractiveSitePropertyAttribute = self.putPropertyAttribute(configurations, self.HIVE_INTERACTIVE_SITE)
+
+    putHiveInteractiveEnvProperty = self.putProperty(configurations, "hive-interactive-env", services)
+    putHiveInteractiveEnvPropertyAttribute = self.putPropertyAttribute(configurations, "hive-interactive-env")
+
+    putTezInteractiveSiteProperty = self.putProperty(configurations, "tez-interactive-site", services)
+
+    llap_daemon_selected_queue_name = None
+    llap_queue_selected_in_current_call = None
+    LLAP_MAX_CONCURRENCY = 32 # Allow a max of 32 concurrency.
+
+    # Update 'hive.llap.daemon.queue.name' prop combo entries and llap capacity slider visibility.
+    self.setLlapDaemonQueuePropAttributesAndCapSliderVisibility(services, configurations)
+
+    if not services["changed-configurations"]:
+      read_llap_daemon_yarn_cont_mb = long(self.get_yarn_min_container_size(services, configurations))
+      putHiveInteractiveSiteProperty('hive.llap.daemon.yarn.container.mb', read_llap_daemon_yarn_cont_mb)
+      # initial memory setting to make sure hive.llap.daemon.yarn.container.mb >= yarn.scheduler.minimum-allocation-mb
+      Logger.info("Adjusted 'hive.llap.daemon.yarn.container.mb' to yarn min container size as initial size "
+                 "(" + str(self.get_yarn_min_container_size(services, configurations)) + " MB).")
+
+    try:
+      if self.HIVE_INTERACTIVE_SITE in services['configurations'] and \
+          'hive.llap.daemon.queue.name' in services['configurations'][self.HIVE_INTERACTIVE_SITE]['properties']:
+        llap_daemon_selected_queue_name =  services['configurations'][self.HIVE_INTERACTIVE_SITE]['properties']['hive.llap.daemon.queue.name']
+
+      if 'hive.llap.daemon.queue.name' in configurations[self.HIVE_INTERACTIVE_SITE]['properties']:
+        llap_queue_selected_in_current_call = configurations[self.HIVE_INTERACTIVE_SITE]['properties']['hive.llap.daemon.queue.name']
+
+      # Update Visibility of 'llap_queue_capacity' slider.
+      capacity_scheduler_properties, received_as_key_value_pair = self.getCapacitySchedulerProperties(services)
+      if capacity_scheduler_properties:
+        # Get all leaf queues.
+        leafQueueNames = self.getAllYarnLeafQueues(capacity_scheduler_properties)
+        if len(leafQueueNames) == 2 and \
+          (llap_daemon_selected_queue_name != None and llap_daemon_selected_queue_name == llap_queue_name) or \
+          (llap_queue_selected_in_current_call != None and llap_queue_selected_in_current_call == llap_queue_name):
+            putHiveInteractiveEnvPropertyAttribute("llap_queue_capacity", "visible", "true")
+            Logger.info("Selected YARN queue is '{0}'. Setting LLAP queue capacity slider visibility to 'True'".format(llap_queue_name))
+        else:
+          putHiveInteractiveEnvPropertyAttribute("llap_queue_capacity", "visible", "false")
+          Logger.info("Queue selected for LLAP app is : '{0}'. Current YARN queues : {1}. Setting '{2}' queue capacity slider "
+                      "visibility to 'False'.".format(llap_daemon_selected_queue_name, list(leafQueueNames), llap_queue_name))
+        if llap_daemon_selected_queue_name:
+          llap_selected_queue_state = self.__getQueueStateFromCapacityScheduler(capacity_scheduler_properties, llap_daemon_selected_queue_name)
+          if llap_selected_queue_state == None or llap_selected_queue_state == "STOPPED":
+            putHiveInteractiveEnvPropertyAttribute("llap_queue_capacity", "visible", "false")
+            raise Fail("Selected LLAP app queue '{0}' current state is : '{1}'. Setting LLAP configs to default values "
+                       "and 'llap' queue capacity slider visibility to 'False'."
+                       .format(llap_daemon_selected_queue_name, llap_selected_queue_state))
+        else:
+          raise Fail("Retrieved LLAP app queue name is : '{0}'. Setting LLAP configs to default values."
+                     .format(llap_daemon_selected_queue_name))
+      else:
+        Logger.error("Couldn't retrieve 'capacity-scheduler' properties while doing YARN queue adjustment for Hive Server Interactive."
+                     " Not calculating LLAP configs.")
+        return
+
+      changed_configs_in_hive_int_env = None
+      llap_concurrency_in_changed_configs = None
+      llap_daemon_queue_in_changed_configs = None
+      # Calculations are triggered only if there is change in any one of the following props :
+      # 'llap_queue_capacity', 'enable_hive_interactive', 'hive.server2.tez.sessions.per.default.queue'
+      # or 'hive.llap.daemon.queue.name' has change in value selection.
+      # OR
+      # services['changed-configurations'] is empty implying that this is the Blueprint call. (1st invocation)
+      if 'changed-configurations' in services.keys():
+        config_names_to_be_checked = set(['llap_queue_capacity', 'enable_hive_interactive'])
+        changed_configs_in_hive_int_env = self.are_config_props_in_changed_configs(services, "hive-interactive-env",
+                                                                                   config_names_to_be_checked, False)
+
+        # Determine if there is change detected in "hive-interactive-site's" configs based on which we calculate llap configs.
+        llap_concurrency_in_changed_configs = self.are_config_props_in_changed_configs(services, "hive-interactive-site",
+                                                                                       set(['hive.server2.tez.sessions.per.default.queue']), False)
+        llap_daemon_queue_in_changed_configs = self.are_config_props_in_changed_configs(services, "hive-interactive-site",
+                                                                                       set(['hive.llap.daemon.queue.name']), False)
+
+      if not changed_configs_in_hive_int_env and \
+        not llap_concurrency_in_changed_configs and \
+        not llap_daemon_queue_in_changed_configs and \
+        services["changed-configurations"]:
+        Logger.info("LLAP parameters not modified. Not adjusting LLAP configs.")
+        Logger.info("Current 'changed-configuration' received is : {0}".format(services["changed-configurations"]))
+        return
+
+      node_manager_host_list = self.get_node_manager_hosts(services, hosts)
+      node_manager_cnt = len(node_manager_host_list)
+      yarn_nm_mem_in_mb = self.get_yarn_nm_mem_in_mb(services, configurations)
+      total_cluster_capacity = node_manager_cnt * yarn_nm_mem_in_mb
+      Logger.info("\n\nCalculated total_cluster_capacity : {0}, using following : node_manager_cnt : {1}, "
+                  "yarn_nm_mem_in_mb : {2}".format(total_cluster_capacity, node_manager_cnt, yarn_nm_mem_in_mb))
+
+      # Check which queue is selected in 'hive.llap.daemon.queue.name', to determine current queue capacity
+      current_selected_queue_for_llap_cap = None
+      yarn_root_queues = capacity_scheduler_properties.get("yarn.scheduler.capacity.root.queues")
+      if llap_queue_selected_in_current_call == llap_queue_name \
+        or llap_daemon_selected_queue_name == llap_queue_name \
+        and (llap_queue_name in yarn_root_queues and len(leafQueueNames) == 2):
+        current_selected_queue_for_llap_cap_perc = self.get_llap_cap_percent_slider(services, configurations)
+        current_selected_queue_for_llap_cap = current_selected_queue_for_llap_cap_perc / 100 * total_cluster_capacity
+      else:  # any queue other than 'llap'
+        current_selected_queue_for_llap_cap = self.__getSelectedQueueTotalCap(capacity_scheduler_properties,
+                                                                              llap_daemon_selected_queue_name, total_cluster_capacity)
+      assert (current_selected_queue_for_llap_cap >= 1), "Current selected queue '{0}' capacity value : {1}. Expected value : >= 1" \
+        .format(llap_daemon_selected_queue_name, current_selected_queue_for_llap_cap)
+      yarn_min_container_size = self.get_yarn_min_container_size(services, configurations)
+      tez_am_container_size = self.calculate_tez_am_container_size(long(total_cluster_capacity))
+      normalized_tez_am_container_size = self._normalizeUp(tez_am_container_size, yarn_min_container_size)
+      Logger.info("Calculated normalized_tez_am_container_size : {0}, using following : tez_am_container_size : {1}, "
+                  "total_cluster_capacity : {2}".format(normalized_tez_am_container_size, tez_am_container_size,
+                                                        total_cluster_capacity))
+      normalized_selected_queue_for_llap_cap = long(self._normalizeDown(current_selected_queue_for_llap_cap, yarn_min_container_size))
+
+      # Get calculated value for Slider AM container Size
+      slider_am_container_size = self._normalizeUp(self.calculate_slider_am_size(yarn_min_container_size),
+                                                   yarn_min_container_size)
+
+      # Read 'hive.server2.tez.sessions.per.default.queue' prop if it's in changed-configs, else calculate it.
+      if not llap_concurrency_in_changed_configs:
+        # Calculate llap concurrency (i.e. Number of Tez AM's)
+        llap_concurrency = float(normalized_selected_queue_for_llap_cap * 0.25 / normalized_tez_am_container_size)
+        llap_concurrency = max(long(llap_concurrency), 1)
+        Logger.info("Calculated llap_concurrency : {0}, using following : normalized_selected_queue_for_llap_cap : {1}, "
+                    "normalized_tez_am_container_size : {2}".format(llap_concurrency, normalized_selected_queue_for_llap_cap,
+                                                                    normalized_tez_am_container_size))
+        # Limit 'llap_concurrency' to reach a max. of 32.
+        if llap_concurrency > LLAP_MAX_CONCURRENCY:
+          llap_concurrency = LLAP_MAX_CONCURRENCY
+      else:
+        # Read current value
+        if 'hive.server2.tez.sessions.per.default.queue' in services['configurations'][self.HIVE_INTERACTIVE_SITE][
+          'properties']:
+          llap_concurrency = long(services['configurations'][self.HIVE_INTERACTIVE_SITE]['properties'][
+                                    'hive.server2.tez.sessions.per.default.queue'])
+          assert (
+          llap_concurrency >= 1), "'hive.server2.tez.sessions.per.default.queue' current value : {0}. Expected value : >= 1" \
+            .format(llap_concurrency)
+        else:
+          raise Fail(
+            "Couldn't retrieve Hive Server interactive's 'hive.server2.tez.sessions.per.default.queue' config.")
+
+
+      # Calculate 'total memory available for llap daemons' across cluster
+      total_am_capacity_required = normalized_tez_am_container_size * llap_concurrency + slider_am_container_size
+      cap_available_for_daemons = normalized_selected_queue_for_llap_cap - total_am_capacity_required
+      Logger.info(
+        "Calculated cap_available_for_daemons : {0}, using following : current_selected_queue_for_llap_cap : {1}, "
+        "yarn_nm_mem_in_mb : {2}, total_cluster_capacity : {3}, normalized_selected_queue_for_llap_cap : {4}, normalized_tez_am_container_size"
+        " : {5}, yarn_min_container_size : {6}, llap_concurrency : {7}, total_am_capacity_required : {8}"
+        .format(cap_available_for_daemons, current_selected_queue_for_llap_cap, yarn_nm_mem_in_mb,
+                total_cluster_capacity,
+                normalized_selected_queue_for_llap_cap, normalized_tez_am_container_size, yarn_min_container_size, llap_concurrency,
+                total_am_capacity_required))
+      if cap_available_for_daemons < yarn_min_container_size:
+        raise Fail(
+          "'Capacity available for LLAP daemons'({0}) < 'YARN minimum container size'({1}). Invalid configuration detected. "
+          "Increase LLAP queue size.".format(cap_available_for_daemons, yarn_min_container_size))
+
+
+
+      # Calculate value for 'num_llap_nodes', an across cluster config.
+      # Also, get calculated value for 'hive.llap.daemon.yarn.container.mb' based on 'num_llap_nodes' value, a per node config.
+      num_llap_nodes_raw = cap_available_for_daemons / yarn_nm_mem_in_mb
+      if num_llap_nodes_raw < 1.00:
+        # Set the llap nodes to min. value of 1 and 'llap_container_size' to min. YARN allocation.
+        num_llap_nodes = 1
+        llap_container_size = self._normalizeUp(cap_available_for_daemons, yarn_min_container_size)
+        Logger.info("Calculated llap_container_size : {0}, using following : cap_available_for_daemons : {1}, "
+                    "yarn_min_container_size : {2}".format(llap_container_size, cap_available_for_daemons,
+                                                           yarn_min_container_size))
+      else:
+        num_llap_nodes = math.floor(num_llap_nodes_raw)
+        llap_container_size = self._normalizeDown(yarn_nm_mem_in_mb, yarn_min_container_size)
+        Logger.info("Calculated llap_container_size : {0}, using following : yarn_nm_mem_in_mb : {1}, "
+                    "yarn_min_container_size : {2}".format(llap_container_size, yarn_nm_mem_in_mb,
+                                                           yarn_min_container_size))
+      Logger.info(
+        "Calculated num_llap_nodes : {0} using following : yarn_nm_mem_in_mb : {1}, cap_available_for_daemons : {2} " \
+        .format(num_llap_nodes, yarn_nm_mem_in_mb, cap_available_for_daemons))
+
+
+      # Calculate value for 'hive.llap.daemon.num.executors', a per node config.
+      hive_tez_container_size = self.get_hive_tez_container_size(services, configurations)
+      if 'yarn.nodemanager.resource.cpu-vcores' in services['configurations']['yarn-site']['properties']:
+        cpu_per_nm_host = float(services['configurations']['yarn-site']['properties'][
+                                  'yarn.nodemanager.resource.cpu-vcores'])
+        assert (cpu_per_nm_host > 0), "'yarn.nodemanager.resource.cpu-vcores' current value : {0}. Expected value : > 0" \
+          .format(cpu_per_nm_host)
+      else:
+        raise Fail("Couldn't retrieve YARN's 'yarn.nodemanager.resource.cpu-vcores' config.")
+
+      num_executors_per_node_raw = math.floor(llap_container_size / hive_tez_container_size)
+      num_executors_per_node = min(num_executors_per_node_raw, cpu_per_nm_host)
+      Logger.info("calculated num_executors_per_node: {0}, using following :  hive_tez_container_size : {1}, "
+                  "cpu_per_nm_host : {2}, num_executors_per_node_raw : {3}, llap_container_size : {4}"
+                  .format(num_executors_per_node, hive_tez_container_size, cpu_per_nm_host, num_executors_per_node_raw,
+                          llap_container_size))
+      assert (num_executors_per_node >= 0), "'Number of executors per node' : {0}. Expected value : > 0".format(
+        num_executors_per_node)
+
+      total_mem_for_executors = num_executors_per_node * hive_tez_container_size
+
+      # Calculate value for 'cache' (hive.llap.io.memory.size), a per node config.
+      cache_size_per_node = llap_container_size - total_mem_for_executors
+      Logger.info(
+        "Calculated cache_size_per_node : {0} using following : hive_container_size : {1}, llap_container_size"
+        " : {2}, num_executors_per_node : {3}"
+        .format(cache_size_per_node, hive_tez_container_size, llap_container_size, num_executors_per_node))
+      if cache_size_per_node < 0:  # Run with '0' cache.
+        Logger.info(
+          "Calculated 'cache_size_per_node' : {0}. Setting 'cache_size_per_node' to 0.".format(cache_size_per_node))
+        cache_size_per_node = 0
+
+
+      # Calculate value for prop 'llap_heap_size'
+      llap_xmx = max(total_mem_for_executors * 0.8, total_mem_for_executors - self.get_llap_headroom_space(services, configurations))
+      Logger.info("Calculated llap_app_heap_size : {0}, using following : hive_container_size : {1}, "
+                  "total_mem_for_executors : {2}".format(llap_xmx, hive_tez_container_size, total_mem_for_executors))
+
+
+      # Updating calculated configs.
+      normalized_tez_am_container_size = long(normalized_tez_am_container_size)
+      putTezInteractiveSiteProperty('tez.am.resource.memory.mb', normalized_tez_am_container_size)
+      Logger.info("'Tez for Hive2' config 'tez.am.resource.memory.mb' updated. Current: {0}".format(
+        normalized_tez_am_container_size))
+
+      if not llap_concurrency_in_changed_configs:
+        min_llap_concurrency = 1
+        putHiveInteractiveSiteProperty('hive.server2.tez.sessions.per.default.queue', llap_concurrency)
+        putHiveInteractiveSitePropertyAttribute('hive.server2.tez.sessions.per.default.queue', "minimum",
+                                                min_llap_concurrency)
+        putHiveInteractiveSitePropertyAttribute('hive.server2.tez.sessions.per.default.queue', "maximum",
+                                                LLAP_MAX_CONCURRENCY)
+        Logger.info(
+          "Hive2 config 'hive.server2.tez.sessions.per.default.queue' updated. Min : {0}, Current: {1}, Max: {2}" \
+          .format(min_llap_concurrency, llap_concurrency, LLAP_MAX_CONCURRENCY))
+
+      num_llap_nodes = long(num_llap_nodes)
+
+      putHiveInteractiveEnvProperty('num_llap_nodes', num_llap_nodes)
+      Logger.info("LLAP config 'num_llap_nodes' updated. Current: {0}".format(num_llap_nodes))
+
+      llap_container_size = long(llap_container_size)
+      putHiveInteractiveSiteProperty('hive.llap.daemon.yarn.container.mb', llap_container_size)
+      Logger.info("LLAP config 'hive.llap.daemon.yarn.container.mb' updated. Current: {0}".format(llap_container_size))
+
+      num_executors_per_node = long(num_executors_per_node)
+      putHiveInteractiveSiteProperty('hive.llap.daemon.num.executors', num_executors_per_node)
+      Logger.info("LLAP config 'hive.llap.daemon.num.executors' updated. Current: {0}".format(num_executors_per_node))
+      # 'hive.llap.io.threadpool.size' config value is to be set same as value calculated for
+      # 'hive.llap.daemon.num.executors' at all times.
+      putHiveInteractiveSiteProperty('hive.llap.io.threadpool.size', num_executors_per_node)
+      Logger.info("LLAP config 'hive.llap.io.threadpool.size' updated. Current: {0}".format(num_executors_per_node))
+
+      cache_size_per_node = long(cache_size_per_node)
+      putHiveInteractiveSiteProperty('hive.llap.io.memory.size', cache_size_per_node)
+      Logger.info("LLAP config 'hive.llap.io.memory.size' updated. Current: {0}".format(cache_size_per_node))
+      llap_io_enabled = 'false'
+      if cache_size_per_node >= 64:
+        llap_io_enabled = 'true'
+
+      putHiveInteractiveSiteProperty('hive.llap.io.enabled', llap_io_enabled)
+      Logger.info("Hive2 config 'hive.llap.io.enabled' updated to '{0}' as part of "
+                  "'hive.llap.io.memory.size' calculation.".format(llap_io_enabled))
+
+      llap_xmx = long(llap_xmx)
+      putHiveInteractiveEnvProperty('llap_heap_size', llap_xmx)
+      Logger.info("LLAP config 'llap_heap_size' updated. Current: {0}".format(llap_xmx))
+
+      slider_am_container_size = long(slider_am_container_size)
+      putHiveInteractiveEnvProperty('slider_am_container_mb', slider_am_container_size)
+      Logger.info("LLAP config 'slider_am_container_mb' updated. Current: {0}".format(slider_am_container_size))
+
+    except Exception as e:
+      # Set default values, if caught an Exception. The 'llap queue capacity' is left untouched, as it can be increased,
+      # triggerring recalculation.
+      Logger.info(e.message+" Skipping calculating LLAP configs. Setting them to minimum values.")
+      traceback.print_exc()
+
+      try:
+        yarn_min_container_size = long(self.get_yarn_min_container_size(services, configurations))
+        slider_am_container_size = long(self.calculate_slider_am_size(yarn_min_container_size))
+
+        node_manager_host_list = self.get_node_manager_hosts(services, hosts)
+        node_manager_cnt = len(node_manager_host_list)
+
+        putHiveInteractiveSiteProperty('hive.server2.tez.sessions.per.default.queue', 1)
+        putHiveInteractiveSitePropertyAttribute('hive.server2.tez.sessions.per.default.queue', "minimum", 1)
+        putHiveInteractiveSitePropertyAttribute('hive.server2.tez.sessions.per.default.queue', "maximum", 32)
+
+        putHiveInteractiveEnvProperty('num_llap_nodes', 0)
+        putHiveInteractiveEnvPropertyAttribute('num_llap_nodes', "minimum", 1)
+        putHiveInteractiveEnvPropertyAttribute('num_llap_nodes', "maximum", node_manager_cnt)
+
+        putHiveInteractiveSiteProperty('hive.llap.daemon.yarn.container.mb', yarn_min_container_size)
+        putHiveInteractiveSitePropertyAttribute('hive.llap.daemon.yarn.container.mb', "minimum", yarn_min_container_size)
+
+        putHiveInteractiveSiteProperty('hive.llap.daemon.num.executors', 0)
+        putHiveInteractiveSitePropertyAttribute('hive.llap.daemon.num.executors', "minimum", 1)
+
+        putHiveInteractiveSiteProperty('hive.llap.io.threadpool.size', 0)
+
+        putHiveInteractiveSiteProperty('hive.llap.io.threadpool.size', 0)
+
+        putHiveInteractiveSiteProperty('hive.llap.io.memory.size', 0)
+
+        putHiveInteractiveEnvProperty('llap_heap_size', 0)
+
+        putHiveInteractiveEnvProperty('slider_am_container_mb', slider_am_container_size)
+
+      except Exception as e:
+        Logger.info("Problem setting minimum values for LLAP configs in Exception code.")
+        traceback.print_exc()
+
+  """
+  Checks for the presence of passed-in configuration properties in a given config, if they are changed.
+  Reads from services["changed-configurations"].
+  Parameters:
+     services: Configuration information for the cluster
+     config_type : Type of the configuration
+     config_names_set : Set of configuration properties to be checked if they are changed.
+     all_exists: If True : returns True only if all properties mentioned in 'config_names_set' we found
+                           in services["changed-configurations"].
+                           Otherwise, returns False.
+                 If False : return True, if any of the properties mentioned in config_names_set we found in
+                           services["changed-configurations"].
+                           Otherwise, returns False.
+  """
+  def are_config_props_in_changed_configs(self, services, config_type, config_names_set, all_exists):
+    changedConfigs = services["changed-configurations"]
+    changed_config_names_set = set()
+    for changedConfig in changedConfigs:
+      if changedConfig['type'] == config_type:
+        changed_config_names_set.update([changedConfig['name']])
+
+    if changed_config_names_set is None:
+      return False
+    else:
+      configs_intersection = changed_config_names_set.intersection(config_names_set)
+      if all_exists:
+        if configs_intersection == config_names_set:
+          return True
+      else:
+        if len(configs_intersection) > 0 :
+          return True
+    return False
+
+  """
+  Returns all the Node Manager configs in cluster.
+  """
+  def get_node_manager_hosts(self, services, hosts):
+    if len(hosts["items"]) > 0:
+      node_manager_hosts = self.getHostsWithComponent("YARN", "NODEMANAGER", services, hosts)
+      assert (node_manager_hosts is not None), "Information about NODEMANAGER not found in cluster."
+      return node_manager_hosts
+
+  """
+  Returns the current LLAP queue capacity percentage value. (llap_queue_capacity)
+  """
+  def get_llap_cap_percent_slider(self, services, configurations):
+    llap_slider_cap_percentage = 0
+    if 'llap_queue_capacity' in services['configurations']['hive-interactive-env']['properties']:
+      llap_slider_cap_percentage = float(
+        services['configurations']['hive-interactive-env']['properties']['llap_queue_capacity'])
+      Logger.error("'llap_queue_capacity' not present in services['configurations']['hive-interactive-env']['properties'].")
+    if llap_slider_cap_percentage <= 0 :
+      if 'hive-interactive-env' in configurations and \
+          'llap_queue_capacity' in configurations["hive-interactive-env"]["properties"]:
+        llap_slider_cap_percentage = float(configurations["hive-interactive-env"]["properties"]["llap_queue_capacity"])
+    assert (llap_slider_cap_percentage > 0), "'llap_queue_capacity' is set to : {0}. Should be > 0.".format(llap_slider_cap_percentage)
+    return llap_slider_cap_percentage
+
+
+  """
+  Returns current value of number of LLAP nodes in cluster (num_llap_nodes)
+  """
+  def get_num_llap_nodes(self, services):
+    if 'num_llap_nodes' in services['configurations']['hive-interactive-env']['properties']:
+      num_llap_nodes = float(
+        services['configurations']['hive-interactive-env']['properties']['num_llap_nodes'])
+      assert (num_llap_nodes > 0), "Number of LLAP nodes read : {0}. Expected value : > 0".format(
+        num_llap_nodes)
+      return num_llap_nodes
+    else:
+      raise Fail("Couldn't retrieve Hive Server interactive's 'num_llap_nodes' config.")
+
+  """
+  Gets HIVE Tez container size (hive.tez.container.size). Takes into account if it has been calculated as part of current
+  Stack Advisor invocation.
+  """
+  def get_hive_tez_container_size(self, services, configurations):
+    hive_container_size = None
+    # Check if 'hive.tez.container.size' is modified in current ST invocation.
+    if 'hive-site' in configurations and 'hive.tez.container.size' in configurations['hive-site']['properties']:
+      hive_container_size = float(configurations['hive-site']['properties']['hive.tez.container.size'])
+      Logger.info("'hive.tez.container.size' read from configurations as : {0}".format(hive_container_size))
+
+    if not hive_container_size:
+      # Check if 'hive.tez.container.size' is input in services array.
+      if 'hive.tez.container.size' in services['configurations']['hive-site']['properties']:
+        hive_container_size = float(services['configurations']['hive-site']['properties']['hive.tez.container.size'])
+        Logger.info("'hive.tez.container.size' read from services as : {0}".format(hive_container_size))
+    if not hive_container_size:
+      raise Fail("Couldn't retrieve Hive Server 'hive.tez.container.size' config.")
+
+    assert (hive_container_size > 0), "'hive.tez.container.size' current value : {0}. Expected value : > 0".format(
+          hive_container_size)
+
+    return hive_container_size
+
+  """
+  Gets HIVE Server Interactive's 'llap_headroom_space' config. (Default value set to 6144 bytes).
+  """
+  def get_llap_headroom_space(self, services, configurations):
+    llap_headroom_space = None
+    # Check if 'llap_headroom_space' is modified in current SA invocation.
+    if 'hive-interactive-env' in configurations and 'llap_headroom_space' in configurations['hive-interactive-env']['properties']:
+      hive_container_size = float(configurations['hive-interactive-env']['properties']['llap_headroom_space'])
+      Logger.info("'llap_headroom_space' read from configurations as : {0}".format(llap_headroom_space))
+
+    if not llap_headroom_space:
+      # Check if 'llap_headroom_space' is input in services array.
+      if 'llap_headroom_space' in services['configurations']['hive-interactive-env']['properties']:
+        llap_headroom_space = float(services['configurations']['hive-interactive-env']['properties']['llap_headroom_space'])
+        Logger.info("'llap_headroom_space' read from services as : {0}".format(llap_headroom_space))
+    if not llap_headroom_space or llap_headroom_space < 1:
+      llap_headroom_space = 6144 # 6GB
+      Logger.info("Couldn't read 'llap_headroom_space' from services or configurations. Returing default value : 6144 bytes")
+
+    return llap_headroom_space
+
+  """
+  Gets YARN's minimum container size (yarn.scheduler.minimum-allocation-mb).
+  Reads from:
+    - configurations (if changed as part of current Stack Advisor invocation (output)), and services["changed-configurations"]
+      is empty, else
+    - services['configurations'] (input).
+
+  services["changed-configurations"] would be empty if Stack Advisor call is made from Blueprints (1st invocation). Subsequent
+  Stack Advisor calls will have it non-empty. We do this because in subsequent invocations, even if Stack Advsior calculates this
+  value (configurations), it is finally not recommended, making 'input' value to survive.
+  """
+  def get_yarn_min_container_size(self, services, configurations):
+    yarn_min_container_size = None
+    # Check if services["changed-configurations"] is empty and 'yarn.scheduler.minimum-allocation-mb' is modified in current ST invocation.
+    if not services["changed-configurations"] and \
+      'yarn-site' in configurations and 'yarn.scheduler.minimum-allocation-mb' in configurations['yarn-site']['properties']:
+      yarn_min_container_size = float(configurations['yarn-site']['properties']['yarn.scheduler.minimum-allocation-mb'])
+      Logger.info("'yarn.scheduler.minimum-allocation-mb' read from configurations as : {0}".format(yarn_min_container_size))
+
+    if not yarn_min_container_size:
+      # Check if 'yarn.scheduler.minimum-allocation-mb' is input in services array.
+      if 'yarn-site' in services['configurations'] and \
+          'yarn.scheduler.minimum-allocation-mb' in services['configurations']['yarn-site']['properties']:
+        yarn_min_container_size = float(services['configurations']['yarn-site']['properties']['yarn.scheduler.minimum-allocation-mb'])
+        Logger.info("'yarn.scheduler.minimum-allocation-mb' read from services as : {0}".format(yarn_min_container_size))
+
+    if not yarn_min_container_size:
+      raise Fail("Couldn't retrieve YARN's 'yarn.scheduler.minimum-allocation-mb' config.")
+
+    assert (yarn_min_container_size > 0), "'yarn.scheduler.minimum-allocation-mb' current value : {0}. " \
+                                            "Expected value : > 0".format(yarn_min_container_size)
+    return yarn_min_container_size
+
+  """
+  Calculates the Slider App Master size based on YARN's Minimum Container Size.
+  """
+  def calculate_slider_am_size(self, yarn_min_container_size):
+    if yarn_min_container_size > 1024:
+      return 1024
+    if yarn_min_container_size >= 256 and yarn_min_container_size <= 1024:
+      return yarn_min_container_size
+    if yarn_min_container_size < 256:
+      return 256
+
+  """
+  Gets YARN NodeManager memory in MB (yarn.nodemanager.resource.memory-mb).
+  Reads from:
+    - configurations (if changed as part of current Stack Advisor invocation (output)), and services["changed-configurations"]
+      is empty, else
+    - services['configurations'] (input).
+
+  services["changed-configurations"] would be empty is Stack Advisor call if made from Blueprints (1st invocation). Subsequent
+  Stack Advisor calls will have it non-empty. We do this because in subsequent invocations, even if Stack Advsior calculates this
+  value (configurations), it is finally not recommended, making 'input' value to survive.
+  """
+  def get_yarn_nm_mem_in_mb(self, services, configurations):
+    yarn_nm_mem_in_mb = None
+
+    # Check if services["changed-configurations"] is empty and 'yarn.nodemanager.resource.memory-mb' is modified in current ST invocation.
+    if not services["changed-configurations"] and\
+        'yarn-site' in configurations and 'yarn.nodemanager.resource.memory-mb' in configurations['yarn-site']['properties']:
+      yarn_nm_mem_in_mb = float(configurations['yarn-site']['properties']['yarn.nodemanager.resource.memory-mb'])
+      Logger.info("'yarn.nodemanager.resource.memory-mb' read from configurations as : {0}".format(yarn_nm_mem_in_mb))
+
+    if not yarn_nm_mem_in_mb:
+      # Check if 'yarn.nodemanager.resource.memory-mb' is input in services array.
+      if 'yarn-site' in services['configurations'] and \
+          'yarn.nodemanager.resource.memory-mb' in services['configurations']['yarn-site']['properties']:
+        yarn_nm_mem_in_mb = float(services['configurations']['yarn-site']['properties']['yarn.nodemanager.resource.memory-mb'])
+        Logger.info("'yarn.nodemanager.resource.memory-mb' read from services as : {0}".format(yarn_nm_mem_in_mb))
+
+    if not yarn_nm_mem_in_mb:
+      raise Fail("Couldn't retrieve YARN's 'yarn.nodemanager.resource.memory-mb' config.")
+
+    assert (yarn_nm_mem_in_mb > 0.0), "'yarn.nodemanager.resource.memory-mb' current value : {0}. " \
+                                      "Expected value : > 0".format(yarn_nm_mem_in_mb)
+
+    return yarn_nm_mem_in_mb
+
+  """
+  Determines Tez App Master container size (tez.am.resource.memory.mb) for tez_hive2/tez-site based on total cluster capacity.
+  """
+  def calculate_tez_am_container_size(self, total_cluster_capacity):
+    if total_cluster_capacity is None or not isinstance(total_cluster_capacity, long):
+      raise Fail ("Passed-in 'Total Cluster Capacity' is : '{0}'".format(total_cluster_capacity))
+
+    if total_cluster_capacity <= 0:
+      raise Fail ("Passed-in 'Total Cluster Capacity' ({0}) is Invalid.".format(total_cluster_capacity))
+    if total_cluster_capacity <= 4096:
+      return 256
+    elif total_cluster_capacity > 4096 and total_cluster_capacity <= 73728:
+      return 512
+    elif total_cluster_capacity > 73728:
+      return 1536
+
+
+  """
+  Calculate minimum queue capacity required in order to get LLAP and HIVE2 app into running state.
+  """
+  def min_queue_perc_reqd_for_llap_and_hive_app(self, services, hosts, configurations):
+    # Get queue size if sized at 20%
+    node_manager_hosts = self.get_node_manager_hosts(services, hosts)
+    yarn_rm_mem_in_mb = self.get_yarn_nm_mem_in_mb(services, configurations)
+    total_cluster_cap = len(node_manager_hosts) * yarn_rm_mem_in_mb
+    total_queue_size_at_20_perc = 20.0 / 100 * total_cluster_cap
+
+    # Calculate based on minimum size required by containers.
+    yarn_min_container_size = self.get_yarn_min_container_size(services, configurations)
+    slider_am_size = self.calculate_slider_am_size(yarn_min_container_size)
+    hive_tez_container_size = self.get_hive_tez_container_size(services, configurations)
+    tez_am_container_size = self.calculate_tez_am_container_size(long(total_cluster_cap))
+    normalized_val = self._normalizeUp(slider_am_size, yarn_min_container_size) + self._normalizeUp\
+      (hive_tez_container_size, yarn_min_container_size) + self._normalizeUp(tez_am_container_size, yarn_min_container_size)
+
+    min_required = max(total_queue_size_at_20_perc, normalized_val)
+
+    min_required_perc = min_required * 100 / total_cluster_cap
+    Logger.info("Calculated 'min_queue_perc_required_in_cluster' : {0}% and 'min_queue_cap_required_in_cluster': {1} "
+                "for LLAP and HIVE2 app using following : yarn_min_container_size : {2}, slider_am_size : {3}, hive_tez_container_size : {4}, "
+                "hive_am_container_size : {5}, total_cluster_cap : {6}, yarn_rm_mem_in_mb : {7}"
+                "".format(min_required_perc, min_required, yarn_min_container_size, slider_am_size, hive_tez_container_size,
+                          tez_am_container_size, total_cluster_cap, yarn_rm_mem_in_mb))
+    return int(math.ceil(min_required_perc))
+
+  """
+  Normalize down 'val2' with respect to 'val1'.
+  """
+  def _normalizeDown(self, val1, val2):
+    tmp = math.floor(val1 / val2)
+    if tmp < 1.00:
+      return val1
+    return tmp * val2
+
+  """
+  Normalize up 'val2' with respect to 'val1'.
+  """
+  def _normalizeUp(self, val1, val2):
+    tmp = math.ceil(val1 / val2)
+    return tmp * val2
+
+  """
+  Checks and (1). Creates 'llap' queue if only 'default' queue exist at leaf level and is consuming 100% capacity OR
+             (2). Updates 'llap' queue capacity and state, if current selected queue is 'llap', and only 2 queues exist
+                  at root level : 'default' and 'llap'.
+  """
+  def checkAndManageLlapQueue(self, services, configurations, hosts, llap_queue_name):
+    Logger.info("Determining creation/adjustment of 'capacity-scheduler' for 'llap' queue.")
+    putHiveInteractiveEnvProperty = self.putProperty(configurations, "hive-interactive-env", services)
+    putHiveInteractiveSiteProperty = self.putProperty(configurations, self.HIVE_INTERACTIVE_SITE, services)
+    putHiveInteractiveEnvPropertyAttribute = self.putPropertyAttribute(configurations, "hive-interactive-env")
+    putCapSchedProperty = self.putProperty(configurations, "capacity-scheduler", services)
+    leafQueueNames = None
+
+    capacity_scheduler_properties, received_as_key_value_pair = self.getCapacitySchedulerProperties(services)
+    if capacity_scheduler_properties:
+      leafQueueNames = self.getAllYarnLeafQueues(capacity_scheduler_properties)
+      # Get the llap Cluster percentage used for 'llap' Queue creation
+      if 'llap_queue_capacity' in services['configurations']['hive-interactive-env']['properties']:
+        llap_slider_cap_percentage = int(
+          services['configurations']['hive-interactive-env']['properties']['llap_queue_capacity'])
+        min_reqd_queue_cap_perc = self.min_queue_perc_reqd_for_llap_and_hive_app(services, hosts, configurations)
+        if min_reqd_queue_cap_perc > 100:
+          min_reqd_queue_cap_perc = 100
+          Logger.info("Received 'Minimum Required LLAP queue capacity' : {0}% (out of bounds), adjusted it to : 100%".format(min_reqd_queue_cap_perc))
+
+        # Adjust 'llap' queue capacity slider value to be minimum required if out of expected bounds.
+        if llap_slider_cap_percentage <= 0 or llap_slider_cap_percentage > 100:
+          Logger.info("Adjusting HIVE 'llap_queue_capacity' from {0}% (invalid size) to {1}%".format(llap_slider_cap_percentage, min_reqd_queue_cap_perc))
+          putHiveInteractiveEnvProperty('llap_queue_capacity', min_reqd_queue_cap_perc)
+          llap_slider_cap_percentage = min_reqd_queue_cap_perc
+      else:
+        Logger.error("Problem retrieving LLAP Queue Capacity. Skipping creating {0} queue".format(llap_queue_name))
+        return
+
+      cap_sched_config_keys = capacity_scheduler_properties.keys()
+
+      yarn_default_queue_capacity = -1
+      if 'yarn.scheduler.capacity.root.default.capacity' in cap_sched_config_keys:
+        yarn_default_queue_capacity = capacity_scheduler_properties.get('yarn.scheduler.capacity.root.default.capacity')
+
+      # Get 'llap' queue state
+      currLlapQueueState = ''
+      if 'yarn.scheduler.capacity.root.'+llap_queue_name+'.state' in cap_sched_config_keys:
+        currLlapQueueState = capacity_scheduler_properties.get('yarn.scheduler.capacity.root.'+llap_queue_name+'.state')
+
+      # Get 'llap' queue capacity
+      currLlapQueueCap = -1
+      if 'yarn.scheduler.capacity.root.'+llap_queue_name+'.capacity' in cap_sched_config_keys:
+        currLlapQueueCap = int(float(capacity_scheduler_properties.get('yarn.scheduler.capacity.root.'+llap_queue_name+'.capacity')))
+
+      if self.HIVE_INTERACTIVE_SITE in services['configurations'] and \
+          'hive.llap.daemon.queue.name' in services['configurations'][self.HIVE_INTERACTIVE_SITE]['properties']:
+        llap_daemon_selected_queue_name =  services['configurations'][self.HIVE_INTERACTIVE_SITE]['properties']['hive.llap.daemon.queue.name']
+      else:
+        Logger.error("Couldn't retrive 'hive.llap.daemon.queue.name' property. Skipping adjusting queues.")
+        return
+      updated_cap_sched_configs_str = ''
+
+      enabled_hive_int_in_changed_configs = self.are_config_props_in_changed_configs(services, "hive-interactive-env",
+                                                                                   set(['enable_hive_interactive']), False)
+      """
+      We create OR "modify 'llap' queue 'state and/or capacity' " based on below conditions:
+       - if only 1 queue exists at root level and is 'default' queue and has 100% cap -> Create 'llap' queue,  OR
+       - if 2 queues exists at root level ('llap' and 'default') :
+           - Queue selected is 'llap' and state is STOPPED -> Modify 'llap' queue state to RUNNING, adjust capacity, OR
+           - Queue selected is 'llap', state is RUNNING and 'llap_queue_capacity' prop != 'llap' queue current running capacity ->
+              Modify 'llap' queue capacity to 'llap_queue_capacity'
+      """
+      if 'default' in leafQueueNames and \
+        ((len(leafQueueNames) == 1 and int(yarn_default_queue_capacity) == 100) or \
+        ((len(leafQueueNames) == 2 and llap_queue_name in leafQueueNames) and \
+           ((currLlapQueueState == 'STOPPED' and enabled_hive_int_in_changed_configs) or (currLlapQueueState == 'RUNNING' and currLlapQueueCap != llap_slider_cap_percentage)))):
+        adjusted_default_queue_cap = str(100 - llap_slider_cap_percentage)
+
+        hive_user = '*'  # Open to all
+        if 'hive_user' in services['configurations']['hive-env']['properties']:
+          hive_user = services['configurations']['hive-env']['properties']['hive_user']
+
+        llap_slider_cap_percentage = str(llap_slider_cap_percenta

<TRUNCATED>

[06/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/widgets.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/widgets.json
new file mode 100755
index 0000000..3176354
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/widgets.json
@@ -0,0 +1,95 @@
+{
+  "layouts": [
+    {
+      "layout_name": "default_system_heatmap",
+      "display_name": "Heatmaps",
+      "section_name": "SYSTEM_HEATMAPS",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "Host Disk Space Used %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "disk_free",
+              "metric_path": "metrics/disk/disk_free",
+              "service_name": "STACK"
+            },
+            {
+              "name": "disk_total",
+              "metric_path": "metrics/disk/disk_total",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Disk Space Used %",
+              "value": "${((disk_total-disk_free)/disk_total)*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Host Memory Used %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "mem_total",
+              "metric_path": "metrics/memory/mem_total",
+              "service_name": "STACK"
+            },
+            {
+              "name": "mem_free",
+              "metric_path": "metrics/memory/mem_free",
+              "service_name": "STACK"
+            },
+            {
+              "name": "mem_cached",
+              "metric_path": "metrics/memory/mem_cached",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Memory Used %",
+              "value": "${((mem_total-mem_free-mem_cached)/mem_total)*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Host CPU Wait IO %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "cpu_wio",
+              "metric_path": "metrics/cpu/cpu_wio",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Memory Used %",
+              "value": "${cpu_wio*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.1/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/kerberos.json b/ambari-server/src/main/resources/stacks/BigInsights/4.1/kerberos.json
new file mode 100755
index 0000000..03198dc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/kerberos.json
@@ -0,0 +1,47 @@
+{
+  "properties": {
+    "realm": "${kerberos-env/realm}",
+    "keytab_dir": "/etc/security/keytabs"
+  },
+  "identities": [
+    {
+      "name": "spnego",
+      "principal": {
+        "value": "HTTP/_HOST@${realm}",
+        "type" : "service"
+      },
+      "keytab": {
+        "file": "${keytab_dir}/spnego.service.keytab",
+        "owner": {
+          "name": "root",
+          "access": "r"
+        },
+        "group": {
+          "name": "${cluster-env/user_group}",
+          "access": "r"
+        }
+      }
+    },
+    {
+      "name": "smokeuser",
+      "principal": {
+        "value": "${cluster-env/smokeuser}-${cluster_name}@${realm}",
+        "type" : "user",
+        "configuration": "cluster-env/smokeuser_principal_name",
+        "local_username" : "${cluster-env/smokeuser}"
+      },
+      "keytab": {
+        "file": "${keytab_dir}/smokeuser.headless.keytab",
+        "owner": {
+          "name": "${cluster-env/smokeuser}",
+          "access": "r"
+        },
+        "group": {
+          "name": "${cluster-env/user_group}",
+          "access": "r"
+        },
+        "configuration": "cluster-env/smokeuser_keytab"
+      }
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.1/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.1/metainfo.xml
new file mode 100755
index 0000000..a40ff2a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/metainfo.xml
@@ -0,0 +1,23 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <versions>
+	  <active>false</active>
+    </versions>
+    <extends>4.0</extends>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.1/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/repos/repoinfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.1/repos/repoinfo.xml
new file mode 100755
index 0000000..dcfab53
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/repos/repoinfo.xml
@@ -0,0 +1,44 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<reposinfo>
+  <mainrepoid>IOP-4.1</mainrepoid>
+  <os family="redhat6">
+    <repo>
+      <baseurl>http://ibm-open-platform.ibm.com/repos/IOP/rhel/6/x86_64/4.1.x/GA/4.1.0.0/</baseurl>
+      <repoid>IOP-4.1</repoid>
+      <reponame>IOP</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://ibm-open-platform.ibm.com/repos/IOP-UTILS/rhel/6/x86_64/1.1/</baseurl>
+      <repoid>IOP-UTILS-1.1</repoid>
+      <reponame>IOP-UTILS</reponame>
+    </repo>
+  </os>
+  <os family="redhat7">
+    <repo>
+      <baseurl>http://ibm-open-platform.ibm.com/repos/IOP/rhel/7/x86_64/4.1.x/GA/4.1.0.0/</baseurl>
+      <repoid>IOP-4.1</repoid>
+      <reponame>IOP</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://ibm-open-platform.ibm.com/repos/IOP-UTILS/rhel/7/x86_64/1.1/</baseurl>
+      <repoid>IOP-UTILS-1.1</repoid>
+      <reponame>IOP-UTILS</reponame>
+    </repo>
+  </os>
+</reposinfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.1/repos/repoinfo.xml.amd64_RH6
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/repos/repoinfo.xml.amd64_RH6 b/ambari-server/src/main/resources/stacks/BigInsights/4.1/repos/repoinfo.xml.amd64_RH6
new file mode 100755
index 0000000..a7309fc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/repos/repoinfo.xml.amd64_RH6
@@ -0,0 +1,32 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<reposinfo>
+  <mainrepoid>IOP-4.1</mainrepoid>
+  <os family="redhat6">
+    <repo>
+      <baseurl>http://ibm-open-platform.ibm.com/repos/IOP/rhel/6/x86_64/4.1.x/GA/4.1.0.0/</baseurl>
+      <repoid>IOP-4.1</repoid>
+      <reponame>IOP</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://ibm-open-platform.ibm.com/repos/IOP-UTILS/rhel/6/x86_64/1.1/</baseurl>
+      <repoid>IOP-UTILS-1.1</repoid>
+      <reponame>IOP-UTILS</reponame>
+    </repo>
+  </os>
+</reposinfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.1/repos/repoinfo.xml.amd64_RH7
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/repos/repoinfo.xml.amd64_RH7 b/ambari-server/src/main/resources/stacks/BigInsights/4.1/repos/repoinfo.xml.amd64_RH7
new file mode 100755
index 0000000..e861312
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/repos/repoinfo.xml.amd64_RH7
@@ -0,0 +1,32 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<reposinfo>
+  <mainrepoid>IOP-4.1</mainrepoid>
+  <os family="redhat7">
+    <repo>
+      <baseurl>http://ibm-open-platform.ibm.com/repos/IOP/rhel/7/x86_64/4.1.x/GA/4.1.0.0/</baseurl>
+      <repoid>IOP-4.1</repoid>
+      <reponame>IOP</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://ibm-open-platform.ibm.com/repos/IOP-UTILS/rhel/7/x86_64/1.1/</baseurl>
+      <repoid>IOP-UTILS-1.1</repoid>
+      <reponame>IOP-UTILS</reponame>
+    </repo>
+  </os>
+</reposinfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.1/repos/repoinfo.xml.amd64_SLES
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/repos/repoinfo.xml.amd64_SLES b/ambari-server/src/main/resources/stacks/BigInsights/4.1/repos/repoinfo.xml.amd64_SLES
new file mode 100755
index 0000000..46ac37c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/repos/repoinfo.xml.amd64_SLES
@@ -0,0 +1,32 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<reposinfo>
+  <mainrepoid>IOP-4.1</mainrepoid>
+  <os family="suse11">
+    <repo>
+      <baseurl>https://ibm-open-platform.ibm.com/repos/IOP/sles/11/x86_64/4.1.x/GA/4.1.0.0/</baseurl>
+      <repoid>IOP-4.1</repoid>
+      <reponame>IOP</reponame>
+    </repo>
+    <repo>
+      <baseurl>https://ibm-open-platform.ibm.com/repos/IOP-UTILS/sles/11/x86_64/1.1/</baseurl>
+      <repoid>IOP-UTILS-1.1</repoid>
+      <reponame>IOP-UTILS</reponame>
+    </repo>
+  </os>
+</reposinfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.1/repos/repoinfo.xml.ppc64le_RH7
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/repos/repoinfo.xml.ppc64le_RH7 b/ambari-server/src/main/resources/stacks/BigInsights/4.1/repos/repoinfo.xml.ppc64le_RH7
new file mode 100755
index 0000000..a45b9ab
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/repos/repoinfo.xml.ppc64le_RH7
@@ -0,0 +1,32 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<reposinfo>
+  <mainrepoid>IOP-4.1</mainrepoid>
+  <os family="redhat7">
+    <repo>
+      <baseurl>http://ibm-open-platform.ibm.com/repos/IOP/rhel/7/ppc64le/4.1.x/GA/4.1.0.0/</baseurl>
+      <repoid>IOP-4.1</repoid>
+      <reponame>IOP</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://ibm-open-platform.ibm.com/repos/IOP-UTILS/rhel/7/ppc64le/1.1/</baseurl>
+      <repoid>IOP-UTILS-1.1</repoid>
+      <reponame>IOP-UTILS</reponame>
+    </repo>
+  </os>
+</reposinfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.1/repos/repoinfo.xml.s390x_RH7
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/repos/repoinfo.xml.s390x_RH7 b/ambari-server/src/main/resources/stacks/BigInsights/4.1/repos/repoinfo.xml.s390x_RH7
new file mode 100755
index 0000000..14aa846
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/repos/repoinfo.xml.s390x_RH7
@@ -0,0 +1,32 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<reposinfo>
+  <mainrepoid>IOP-4.1</mainrepoid>
+  <os family="redhat7">
+    <repo>
+      <baseurl>https://ibm-open-platform.ibm.com/repos/IOP/rhel/7/s390x/4.1.x/GA/4.1.0.0/</baseurl>
+      <repoid>IOP-4.1</repoid>
+      <reponame>IOP</reponame>
+    </repo>
+    <repo>
+      <baseurl>https://ibm-open-platform.ibm.com/repos/IOP-UTILS/rhel/7/s390x/1.1/</baseurl>
+      <repoid>IOP-UTILS-1.1</repoid>
+      <reponame>IOP-UTILS</reponame>
+    </repo>
+  </os>
+</reposinfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.1/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/role_command_order.json b/ambari-server/src/main/resources/stacks/BigInsights/4.1/role_command_order.json
new file mode 100755
index 0000000..a70e4f7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/role_command_order.json
@@ -0,0 +1,22 @@
+{
+  "_comment" : "Record format:",
+  "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
+  "general_deps" : {
+    "_comment" : "dependencies for all cases",
+    "SOLR-START": ["ZOOKEEPER_SERVER-START", "NAMENODE-START", "DATANODE-START", "SECONDARY_NAMENODE-START"],
+    "SOLR_SERVICE_CHECK-SERVICE_CHECK": ["SOLR-START", "HDFS_SERVICE_CHECK-SERVICE_CHECK"],
+    "KAFKA_BROKER-START" : ["ZOOKEEPER_SERVER-START"],
+    "KAFKA_SERVICE_CHECK-SERVICE_CHECK": ["KAFKA_BROKER-START"],
+    "BIGSQL_HEAD-INSTALL" : ["HIVE_CLIENT-INSTALL", "HDFS_CLIENT-INSTALL", "HBASE_CLIENT-INSTALL", "SQOOP-INSTALL"],
+    "BIGSQL_WORKER-INSTALL" : ["HIVE_CLIENT-INSTALL", "HDFS_CLIENT-INSTALL", "HBASE_CLIENT-INSTALL", "SQOOP-INSTALL", "SLIDER-INSTALL"],
+    "BIGSQL_HEAD-START": ["HBASE_REGIONSERVER-START", "OOZIE_SERVER-START", "WEBHCAT_SERVER-START", "HIVE_METASTORE-START", "HIVE_SERVER-START"],
+    "BIGSQL_SECONDARY-START": ["BIGSQL_HEAD-START"],
+    "BIGSQL_WORKER-START": ["BIGSQL_HEAD-START", "BIGSQL_SECONDARY-START"],
+    "BIGSQL_SERVICE_CHECK-SERVICE_CHECK": ["BIGSQL_WORKER-START"]
+  },
+  "_comment" : "Dependencies that are used when GLUSTERFS is not present in cluster",
+  "optional_no_glusterfs": {
+    "METRICS_COLLECTOR-START": ["NAMENODE-START", "DATANODE-START"],
+    "AMBARI_METRICS_SERVICE_CHECK-SERVICE_CHECK": ["METRICS_COLLECTOR-START", "HDFS_SERVICE_CHECK-SERVICE_CHECK"]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/AMBARI_METRICS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/AMBARI_METRICS/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/AMBARI_METRICS/metainfo.xml
new file mode 100755
index 0000000..03f1dce
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/AMBARI_METRICS/metainfo.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>AMBARI_METRICS</name>
+      <version>0.1.0</version>
+    </service>
+  </services>
+</metainfo>
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/FLUME/configuration/flume-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/FLUME/configuration/flume-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/FLUME/configuration/flume-env.xml
new file mode 100755
index 0000000..3747825
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/FLUME/configuration/flume-env.xml
@@ -0,0 +1,70 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>	
+    <name>flume_run_dir</name>	
+    <value>/var/run/flume</value>	
+    <description>Location to save information about running agents</description>	
+  </property>
+
+  <!-- flume-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for flume-env.sh file</description>
+    <value>
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# If this file is placed at FLUME_CONF_DIR/flume-env.sh, it will be sourced
+# during Flume startup.
+
+# Enviroment variables can be set here.
+
+export JAVA_HOME={{java_home}}
+
+# Give Flume more memory and pre-allocate, enable remote monitoring via JMX
+# export JAVA_OPTS="-Xms100m -Xmx2000m -Dcom.sun.management.jmxremote"
+
+# Note that the Flume conf directory is always included in the classpath.
+# Add flume sink to classpath
+if [ -e "/usr/lib/flume/lib/ambari-metrics-flume-sink.jar" ]; then
+  export FLUME_CLASSPATH=$FLUME_CLASSPATH:/usr/lib/flume/lib/ambari-metrics-flume-sink.jar
+fi
+
+export HIVE_HOME={{flume_hive_home}}
+export HCAT_HOME={{flume_hcat_home}}
+    </value>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/FLUME/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/FLUME/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/FLUME/metainfo.xml
new file mode 100755
index 0000000..75e5ead
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/FLUME/metainfo.xml
@@ -0,0 +1,36 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>FLUME</name>
+      <version>1.5.2</version>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>flume_4_1_*</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HBASE/metainfo.xml
new file mode 100755
index 0000000..80aa3c2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HBASE/metainfo.xml
@@ -0,0 +1,45 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HBASE</name>
+      <version>1.1.1</version>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>hbase_4_1_*</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      
+      <themes>
+        <theme>
+           <fileName>theme.json</fileName>
+           <default>true</default>
+        </theme>
+      </themes>
+      
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HBASE/themes/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HBASE/themes/theme.json b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HBASE/themes/theme.json
new file mode 100755
index 0000000..1808809
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HBASE/themes/theme.json
@@ -0,0 +1,367 @@
+{
+  "name": "default",
+  "description": "Default theme for HBASE service",
+  "configuration": {
+    "layouts": [
+      {
+        "name": "default",
+        "tabs": [
+          {
+            "name": "settings",
+            "display-name": "Settings",
+            "layout": {
+              "tab-columns": "3",
+              "tab-rows": "3",
+              "sections": [
+                {
+                  "name": "section-hbase-memory",
+                  "display-name": "Server",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "2",
+                  "section-columns": "2",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-hbase-memory-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-hbase-memory-col2",
+                      "row-index": "0",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-hbase-client",
+                  "display-name": "Client",
+                  "row-index": "0",
+                  "column-index": "2",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-hbase-client-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-hbase-disk",
+                  "display-name": "Disk",
+                  "row-index": "1",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "3",
+                  "section-columns": "3",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-hbase-disk-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-hbase-disk-col2",
+                      "row-index": "0",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "subsection-hbase-disk-col3",
+                      "row-index": "0",
+                      "column-index": "2",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-hbase-timeouts",
+                  "display-name": "Timeouts",
+                  "row-index": "2",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-hbase-timeouts-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-hbase-security",
+                  "display-name": "Security",
+                  "row-index": "2",
+                  "column-index": "1",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-hbase-security-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "hbase-env/hbase_master_heapsize",
+          "subsection-name": "subsection-hbase-memory-col1"
+        },
+        {
+          "config": "hbase-env/hbase_regionserver_heapsize",
+          "subsection-name": "subsection-hbase-memory-col1"
+        },
+        {
+          "config": "hbase-site/hfile.block.cache.size",
+          "subsection-name": "subsection-hbase-memory-col1"
+        },
+        {
+          "config": "hbase-site/hbase.regionserver.global.memstore.size",
+          "subsection-name": "subsection-hbase-memory-col1"
+        },
+        {
+          "config": "hbase-site/hbase.hregion.memstore.flush.size",
+          "subsection-name": "subsection-hbase-memory-col2"
+        },
+        {
+          "config": "hbase-site/hbase.hregion.memstore.block.multiplier",
+          "subsection-name": "subsection-hbase-memory-col2"
+        },
+        {
+          "config": "hbase-site/hbase.regionserver.handler.count",
+          "subsection-name": "subsection-hbase-memory-col2"
+        },
+        {
+          "config": "hbase-site/hbase.client.retries.number",
+          "subsection-name": "subsection-hbase-client-col1"
+        },
+        {
+          "config": "hbase-site/hbase.client.keyvalue.maxsize",
+          "subsection-name": "subsection-hbase-client-col1"
+        },
+        {
+          "config": "hbase-site/hbase.hregion.max.filesize",
+          "subsection-name": "subsection-hbase-disk-col1"
+        },
+        {
+          "config": "hbase-site/hbase.hregion.majorcompaction",
+          "subsection-name": "subsection-hbase-disk-col2"
+        },
+        {
+          "config": "hbase-site/hbase.hstore.compaction.max",
+          "subsection-name": "subsection-hbase-disk-col3"
+        },
+        {
+          "config": "hbase-site/zookeeper.session.timeout",
+          "subsection-name": "subsection-hbase-timeouts-col1"
+        },
+        {
+          "config": "hbase-site/hbase.rpc.timeout",
+          "subsection-name": "subsection-hbase-timeouts-col1"
+        },
+        {
+          "config": "hbase-site/hbase.security.authentication",
+          "subsection-name": "subsection-hbase-security-col1"
+        },
+        {
+          "config": "hbase-site/hbase.security.authorization",
+          "subsection-name": "subsection-hbase-security-col1"
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "hbase-env/hbase_master_heapsize",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-env/hbase_regionserver_heapsize",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hfile.block.cache.size",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "percent"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hbase.regionserver.global.memstore.size",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "percent"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hbase.hregion.memstore.flush.size",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hbase.hregion.memstore.block.multiplier",
+        "widget": {
+          "type": "combo"
+        }
+      },
+      {
+        "config": "hbase-site/hbase.regionserver.handler.count",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hbase.client.retries.number",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hbase.client.keyvalue.maxsize",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hbase.hregion.max.filesize",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hbase.hregion.majorcompaction",
+        "widget": {
+          "type": "time-interval-spinner",
+          "units": [
+            {
+              "unit-name": "days,hours"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hbase.hstore.compaction.max",
+        "widget": {
+          "type": "combo"
+        }
+      },
+      {
+        "config": "hbase-site/zookeeper.session.timeout",
+        "widget": {
+          "type": "time-interval-spinner",
+          "units": [
+            {
+              "unit-name": "minutes,seconds"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hbase.rpc.timeout",
+        "widget": {
+          "type": "time-interval-spinner",
+          "units": [
+            {
+              "unit-name": "minutes,seconds"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hbase-site/hbase.security.authentication",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "hbase-site/hbase.security.authorization",
+        "widget": {
+          "type": "toggle"
+        }
+      }
+    ]
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HDFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HDFS/configuration/hadoop-env.xml
new file mode 100755
index 0000000..d92e473
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HDFS/configuration/hadoop-env.xml
@@ -0,0 +1,166 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <!-- hadoop-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for hadoop-env.sh file</description>
+    <value>
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.  Required.
+export JAVA_HOME={{java_home}}
+export HADOOP_HOME_WARN_SUPPRESS=1
+
+# Hadoop home directory
+export HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+
+# Hadoop Configuration Directory
+#TODO: if env var set that can cause problems
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
+
+{# this is different for HDP1 #}
+# Path to jsvc required by secure HDP 2.0 datanode
+export JSVC_HOME={{jsvc_path}}
+
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+
+export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+
+# Command specific options appended to HADOOP_OPTS when specified
+
+{% if java_version &lt; 8 %}
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+export HADOOP_DATANODE_OPTS="-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS"
+{% else %}
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:MetaspaceSize={{namenode_opt_permsize}} -XX:MaxMetaspaceSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+export HADOOP_DATANODE_OPTS="-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m -XX:MaxMetaspaceSize=512m $HADOOP_CLIENT_OPTS"
+{% endif %}
+
+export HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS
+
+HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
+
+HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
+HADOOP_NFS3_OPTS="-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
+
+# On secure datanodes, user to run the datanode as after dropping privileges
+export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}
+
+# Extra ssh options.  Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
+
+# History server logs
+export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from.  Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands.  Unset by default.  This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
+export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# History server pid
+export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
+
+YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes.  See 'man nice'.
+
+# export HADOOP_NICENESS=10
+
+# Use libraries from standard classpath
+JAVA_JDBC_LIBS=""
+#Add libraries required by mysql connector
+for jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by oracle connector
+for jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by nodemanager
+MAPREDUCE_LIBS={{mapreduce_libs_path}}
+export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
+
+#if [ -d "/usr/lib/tez" ]; then
+#  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf
+#fi
+
+# Setting path to hdfs command line
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+
+#Mostly required for hadoop 2.0
+export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/iop/current/hadoop-client/lib/native/Linux-amd64-64
+
+#Hadoop logging options. Modify and uncomment to change logging level
+#export HADOOP_ROOT_LOGGER={{hadoop_root_logger}}
+    </value>
+  </property>
+  <property>
+    <name>nfsgateway_heapsize</name>
+    <display-name>NFSGateway maximum Java heap size</display-name>
+    <value>1024</value>
+    <description>Maximum Java heap size for NFSGateway (Java option -Xmx)</description>
+    <value-attributes>
+      <type>int</type>
+      <unit>MB</unit>
+    </value-attributes>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HDFS/configuration/hdfs-site.xml
new file mode 100755
index 0000000..1a40673
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HDFS/configuration/hdfs-site.xml
@@ -0,0 +1,46 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration supports_final="true">
+
+  <property>
+    <name>nfs.file.dump.dir</name>
+    <value>/tmp/.hdfs-nfs</value>
+    <display-name>NFSGateway dump directory</display-name>
+    <description>
+      This directory is used to temporarily save out-of-order writes before
+      writing to HDFS. For each file, the out-of-order writes are dumped after
+      they are accumulated to exceed certain threshold (e.g., 1MB) in memory.
+      One needs to make sure the directory has enough space.
+    </description>
+    <value-attributes>
+        <type>directory</type>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>nfs.exports.allowed.hosts</name>
+    <value>* rw</value>
+    <display-name>Allowed hosts</display-name>
+  </property>
+
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HDFS/metainfo.xml
new file mode 100755
index 0000000..4c00b4f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HDFS/metainfo.xml
@@ -0,0 +1,127 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HDFS</name>
+      <version>2.7.1</version>
+
+	  <components>
+		<component>
+			<name>NFS_GATEWAY</name>
+			<displayName>NFSGateway</displayName>
+			<cardinality>0+</cardinality>
+			<versionAdvertised>true</versionAdvertised>
+			<category>SLAVE</category>
+			<commandScript>
+				<script>scripts/nfsgateway.py</script>
+				<scriptType>PYTHON</scriptType>
+				<timeout>1200</timeout>
+			</commandScript>
+			<dependencies>
+				<dependency>
+					<name>HDFS/HDFS_CLIENT</name>
+					<scope>host</scope>
+					<auto-deploy>
+						<enabled>true</enabled>
+					</auto-deploy>
+				</dependency>
+			</dependencies>
+		</component>
+	  </components>
+      
+      <osSpecifics> 
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>rpcbind</name>
+            </package>
+          </packages>
+        </osSpecific>
+        
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>hadoop_4_1_*</name>
+            </package>
+            <package>
+              <name>hadoop-lzo</name>
+              <!--name>hadoop_4_1_*-lzo</name-->
+            </package>
+          </packages>
+        </osSpecific>
+        
+        <osSpecific>
+          <osFamily>redhat7,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>snappy</name>
+            </package>
+            <package>
+              <name>lzo</name>
+            </package>
+            <package>
+              <name>hadoop-lzo-native</name>
+            </package>
+            <package>
+              <name>hadoop_4_1_*-libhdfs</name>
+            </package>
+            <package>
+              <name>ambari-log4j</name>
+            </package>
+          </packages>
+        </osSpecific>
+        
+        <osSpecific>
+          <osFamily>ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>libsnappy1</name>
+            </package>
+            <package>
+              <name>libsnappy-dev</name>
+            </package>
+            <package>
+              <name>liblzo2-2</name>
+            </package>
+            <package>
+              <name>hadoop-hdfs</name>
+            </package>
+            <package>
+              <name>libhdfs0</name>
+            </package>
+            <package>
+              <name>libhdfs0-dev</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      
+      <themes>
+        <theme>
+           <fileName>theme.json</fileName>
+           <default>true</default>
+        </theme>
+      </themes>
+      
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HDFS/themes/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HDFS/themes/theme.json b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HDFS/themes/theme.json
new file mode 100755
index 0000000..6f2b797
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HDFS/themes/theme.json
@@ -0,0 +1,179 @@
+{
+  "name": "default",
+  "description": "Default theme for HDFS service",
+  "configuration": {
+    "layouts": [
+      {
+        "name": "default",
+        "tabs": [
+          {
+            "name": "settings",
+            "display-name": "Settings",
+            "layout": {
+              "tab-columns": "2",
+              "tab-rows": "1",
+              "sections": [
+                {
+                  "name": "section-namenode",
+                  "display-name": "NameNode",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-namenode-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "section-datanode",
+                  "display-name": "DataNode",
+                  "row-index": "0",
+                  "column-index": "1",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-datanode-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "hdfs-site/dfs.namenode.name.dir",
+          "subsection-name": "subsection-namenode-col1"
+        },
+        {
+          "config": "hadoop-env/namenode_heapsize",
+          "subsection-name": "subsection-namenode-col1"
+        },
+        {
+          "config": "hdfs-site/dfs.namenode.handler.count",
+          "subsection-name": "subsection-namenode-col1"
+        },
+        {
+          "config": "hdfs-site/dfs.namenode.safemode.threshold-pct",
+          "subsection-name": "subsection-namenode-col1"
+        },
+        {
+          "config": "hdfs-site/dfs.datanode.data.dir",
+          "subsection-name": "subsection-datanode-col1"
+        },
+        {
+          "config": "hdfs-site/dfs.datanode.failed.volumes.tolerated",
+          "subsection-name": "subsection-datanode-col1"
+        },
+        {
+          "config": "hadoop-env/dtnode_heapsize",
+          "subsection-name": "subsection-datanode-col1"
+        },
+        {
+          "config": "hdfs-site/dfs.datanode.max.transfer.threads",
+          "subsection-name": "subsection-datanode-col1"
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "hdfs-site/dfs.namenode.name.dir",
+        "widget": {
+          "type": "directories"
+        }
+      },
+      {
+        "config": "hdfs-site/dfs.namenode.safemode.threshold-pct",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "percent"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hdfs-site/dfs.namenode.handler.count",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hadoop-env/namenode_heapsize",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hdfs-site/dfs.datanode.failed.volumes.tolerated",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hdfs-site/dfs.datanode.data.dir",
+        "widget": {
+          "type": "directories"
+        }
+      },
+      {
+        "config": "hadoop-env/dtnode_heapsize",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "GB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hdfs-site/dfs.datanode.max.transfer.threads",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      }
+    ]
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HDFS/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HDFS/widgets.json b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HDFS/widgets.json
new file mode 100755
index 0000000..85082d0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HDFS/widgets.json
@@ -0,0 +1,644 @@
+{
+  "layouts": [
+    {
+      "layout_name": "default_hdfs_dashboard",
+      "display_name": "Standard HDFS Dashboard",
+      "section_name": "HDFS_SUMMARY",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "NameNode GC count",
+          "description": "Count of total garbage collections and count of major type garbage collections of the JVM.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "jvm.JvmMetrics.GcCount",
+              "metric_path": "metrics/jvm/gcCount",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "jvm.JvmMetrics.GcCountConcurrentMarkSweep",
+              "metric_path": "metrics/jvm/GcCountConcurrentMarkSweep",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "GC total count",
+              "value": "${jvm.JvmMetrics.GcCount}"
+            },
+            {
+              "name": "GC count of type major collection",
+              "value": "${jvm.JvmMetrics.GcCountConcurrentMarkSweep}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "NameNode GC time",
+          "description": "Total time taken by major type garbage collections in milliseconds.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep",
+              "metric_path": "metrics/jvm/GcTimeMillisConcurrentMarkSweep",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "GC time in major collection",
+              "value": "${jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep}"
+            }
+          ],
+          "properties": {
+            "display_unit": "ms",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "NN Connection Load",
+          "description": "Number of open RPC connections being managed by NameNode.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "rpc.rpc.NumOpenConnections",
+              "metric_path": "metrics/rpc/NumOpenConnections",
+              "category": "",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "Open Connections",
+              "value": "${rpc.rpc.NumOpenConnections}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "NameNode Heap",
+          "description": "Heap memory committed and Heap memory used with respect to time.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "jvm.JvmMetrics.MemHeapCommittedM",
+              "metric_path": "metrics/jvm/memHeapCommittedM",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "jvm.JvmMetrics.MemHeapUsedM",
+              "metric_path": "metrics/jvm/memHeapUsedM",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "JVM heap committed",
+              "value": "${jvm.JvmMetrics.MemHeapCommittedM}"
+            },
+            {
+              "name": "JVM heap used",
+              "value": "${jvm.JvmMetrics.MemHeapUsedM}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "NameNode Host Load",
+          "description": "Percentage of CPU and Memory resources being consumed on NameNode host.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "cpu_system",
+              "metric_path": "metrics/cpu/cpu_system",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "cpu_user",
+              "metric_path": "metrics/cpu/cpu_user",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "cpu_nice",
+              "metric_path": "metrics/cpu/cpu_nice",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "cpu_idle",
+              "metric_path": "metrics/cpu/cpu_idle",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "cpu_wio",
+              "metric_path": "metrics/cpu/cpu_wio",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "mem_total",
+              "metric_path": "metrics/memory/mem_total",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "mem_free",
+              "metric_path": "metrics/memory/mem_free",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "mem_cached",
+              "metric_path": "metrics/memory/mem_cached",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "CPU utilization",
+              "value": "${((cpu_system + cpu_user + cpu_nice)/(cpu_system + cpu_user + cpu_nice + cpu_idle + cpu_wio)) * 100}"
+            },
+            {
+              "name": "Memory utilization",
+              "value": "${((mem_total - mem_free - mem_cached)/mem_total) * 100}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1",
+            "display_unit": "%"
+          }
+        },
+        {
+          "widget_name": "NameNode RPC",
+          "description": "Compares the average time spent for RPC request in a queue and RPC request being processed.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "rpc.rpc.RpcQueueTimeAvgTime",
+              "metric_path": "metrics/rpc/RpcQueueTime_avg_time",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            },
+            {
+              "name": "rpc.rpc.RpcProcessingTimeAvgTime",
+              "metric_path": "metrics/rpc/RpcProcessingTime_avg_time",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "RPC Queue Wait time",
+              "value": "${rpc.rpc.RpcQueueTimeAvgTime}"
+            },
+            {
+              "name": "RPC Processing time",
+              "value": "${rpc.rpc.RpcProcessingTimeAvgTime}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1",
+            "display_unit": "ms"
+          }
+        },
+        {
+          "widget_name": "NameNode Operations",
+          "description": "Total number of file operation over time.",
+          "widget_type": "GRAPH",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "dfs.namenode.TotalFileOps",
+              "metric_path": "metrics/dfs/namenode/TotalFileOps",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE",
+              "host_component_criteria": "host_components/metrics/dfs/FSNamesystem/HAState=active"
+            }
+          ],
+          "values": [
+            {
+              "name": "NameNode File Operations",
+              "value": "${dfs.namenode.TotalFileOps}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Failed disk volumes",
+          "description": "Number of Failed disk volumes across all DataNodes. Its indicative of HDFS bad health.",
+          "widget_type": "NUMBER",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "dfs.datanode.VolumeFailures._sum",
+              "metric_path": "metrics/dfs/datanode/VolumeFailures._sum",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "Failed disk volumes",
+              "value": "${dfs.datanode.VolumeFailures._sum}"
+            }
+          ],
+          "properties": {
+            "display_unit": ""
+          }
+        },
+        {
+          "widget_name": "Corrupted Blocks",
+          "description": "Number represents data blocks that have become corrupted or missing. Its indicative of HDFS bad health.",
+          "widget_type": "NUMBER",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "Hadoop:service=NameNode,name=FSNamesystem.CorruptBlocks",
+              "metric_path": "metrics/dfs/FSNamesystem/CorruptBlocks",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "Corrupted Blocks",
+              "value": "${Hadoop:service=NameNode,name=FSNamesystem.CorruptBlocks}"
+            }
+          ],
+          "properties": {
+            "warning_threshold": "0",
+            "error_threshold": "50"
+          }
+        },
+        {
+          "widget_name": "Under Replicated Blocks",
+          "description": "Number represents file blocks that does not meet the replication factor criteria. Its indicative of HDFS bad health.",
+          "widget_type": "NUMBER",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "Hadoop:service=NameNode,name=FSNamesystem.UnderReplicatedBlocks",
+              "metric_path": "metrics/dfs/FSNamesystem/UnderReplicatedBlocks",
+              "service_name": "HDFS",
+              "component_name": "NAMENODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "Under Replicated Blocks",
+              "value": "${Hadoop:service=NameNode,name=FSNamesystem.UnderReplicatedBlocks}"
+            }
+          ],
+          "properties": {
+            "warning_threshold": "0",
+            "error_threshold": "50"
+          }
+        }, 
+        {
+          "widget_name": "DataNode JVM Heap Memory Used",
+          "description": "DataNode JVM Heap Memory Used",
+          "widget_type": "NUMBER",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "Hadoop:service=DataNode,name=JvmMetrics.MemHeapUsedM",
+              "metric_path": "metrics/jvm/memHeapUsedM",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "DataNode JVM Heap Memory Used",
+              "value": "${Hadoop:service=DataNode,name=JvmMetrics.MemHeapUsedM}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "max_limit": "512"
+          }
+        },
+        {
+          "widget_name": "HDFS Space Utilization",
+          "description": "Percentage of available space used in the DFS.",
+          "widget_type": "GAUGE",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.DfsUsed",
+              "metric_path": "metrics/dfs/datanode/DfsUsed",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity",
+              "metric_path": "metrics/dfs/datanode/Capacity",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "HDFS Space Utilization",
+              "value": "${FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.DfsUsed/FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity}"
+            }
+          ],
+          "properties": {
+            "warning_threshold": "0.75",
+            "error_threshold": "0.9"
+          }
+        }
+      ]
+    },
+    {
+      "layout_name": "default_hdfs_heatmap",
+      "section_name": "HDFS_HEATMAPS",
+      "display_name": "HDFS Heatmaps",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "HDFS Bytes Read",
+          "default_section_name": "HDFS_HEATMAPS",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "dfs.datanode.BytesRead",
+              "metric_path": "metrics/dfs/datanode/bytes_read",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "HDFS Bytes Read",
+              "value": "${dfs.datanode.BytesRead}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "max_limit": "1024"
+          }
+        },
+        {
+          "widget_name": "HDFS Bytes Written",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "dfs.datanode.BytesWritten",
+              "metric_path": "metrics/dfs/datanode/bytes_written",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "HDFS Bytes Written",
+              "value": "${dfs.datanode.BytesWritten}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "max_limit": "1024"
+          }
+        },
+        {
+          "widget_name": "DataNode Garbage Collection Time",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=DataNode,name=JvmMetrics.GcTimeMillis",
+              "metric_path": "metrics/jvm/gcTimeMillis",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "DataNode Garbage Collection Time",
+              "value": "${Hadoop:service=DataNode,name=JvmMetrics.GcTimeMillis}"
+            }
+          ],
+          "properties": {
+            "display_unit": "ms",
+            "max_limit": "10000"
+          }
+        },
+        {
+          "widget_name": "DataNode JVM Heap Memory Used",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=DataNode,name=JvmMetrics.MemHeapUsedM",
+              "metric_path": "metrics/jvm/memHeapUsedM",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "DataNode JVM Heap Memory Used",
+              "value": "${Hadoop:service=DataNode,name=JvmMetrics.MemHeapUsedM}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "max_limit": "512"
+          }
+        },
+        {
+          "widget_name": "DataNode JVM Heap Memory Committed",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=DataNode,name=JvmMetrics.MemHeapCommittedM",
+              "metric_path": "metrics/jvm/memHeapCommittedM",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "DataNode JVM Heap Memory Committed",
+              "value": "${Hadoop:service=DataNode,name=JvmMetrics.MemHeapCommittedM}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "max_limit": "512"
+          }
+        },
+        {
+          "widget_name": "DataNode Process Disk I/O Utilization",
+          "default_section_name": "HDFS_HEATMAPS",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "dfs.datanode.BytesRead",
+              "metric_path": "metrics/dfs/datanode/bytes_read",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "dfs.datanode.BytesWritten",
+              "metric_path": "metrics/dfs/datanode/bytes_written",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "dfs.datanode.TotalReadTime",
+              "metric_path": "metrics/dfs/datanode/total_read_time",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "dfs.datanode.TotalWriteTime",
+              "metric_path": "metrics/dfs/datanode/total_write_time",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "DataNode Process Disk I/O Utilization",
+              "value": "${(((dfs.datanode.BytesRead/dfs.datanode.TotalReadTime)+(dfs.datanode.BytesWritten/dfs.datanode.TotalWriteTime))*50}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "DataNode Process Network I/O Utilization",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "dfs.datanode.RemoteBytesRead",
+              "metric_path": "metrics/dfs/datanode/remote_bytes_read",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "dfs.datanode.ReadsFromRemoteClient",
+              "metric_path": "metrics/dfs/datanode/reads_from_remote_client",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "dfs.datanode.RemoteBytesWritten",
+              "metric_path": "metrics/dfs/datanode/remote_bytes_written",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "dfs.datanode.WritesFromRemoteClient",
+              "metric_path": "metrics/dfs/datanode/writes_from_remote_client",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "DataNode Process Network I/O Utilization",
+              "value": "${((dfs.datanode.RemoteBytesRead/dfs.datanode.ReadsFromRemoteClient)+(dfs.datanode.RemoteBytesWritten/dfs.datanode.WritesFromRemoteClient))*50}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "HDFS Space Utilization",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.DfsUsed",
+              "metric_path": "metrics/dfs/datanode/DfsUsed",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            },
+            {
+              "name": "FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity",
+              "metric_path": "metrics/dfs/datanode/Capacity",
+              "service_name": "HDFS",
+              "component_name": "DATANODE"
+            }
+          ],
+          "values": [
+            {
+              "name": "HDFS Space Utilization",
+              "value": "${(FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.DfsUsed/FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        }
+      ]
+    }
+  ]
+}


[23/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/package/templates/kdc_conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/package/templates/kdc_conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/package/templates/kdc_conf.j2
new file mode 100755
index 0000000..f78adc7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/package/templates/kdc_conf.j2
@@ -0,0 +1,30 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+[kdcdefaults]
+  kdc_ports = {{kdcdefaults_kdc_ports}}
+  kdc_tcp_ports = {{kdcdefaults_kdc_tcp_ports}}
+
+[realms]
+  {{realm}} = {
+    acl_file = {{kadm5_acl_path}}
+    dict_file = /usr/share/dict/words
+    admin_keytab = {{kadm5_acl_dir}}/kadm5.keytab
+    supported_enctypes = {{encryption_types}}
+  }
+
+{# Append additional realm declarations should be placed below #}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/package/templates/krb5_conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/package/templates/krb5_conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/package/templates/krb5_conf.j2
new file mode 100755
index 0000000..733d38a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KERBEROS/package/templates/krb5_conf.j2
@@ -0,0 +1,55 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+[libdefaults]
+  renew_lifetime = 7d
+  forwardable = true
+  default_realm = {{realm|upper()}}
+  ticket_lifetime = 24h
+  dns_lookup_realm = false
+  dns_lookup_kdc = false
+  #default_tgs_enctypes = {{encryption_types}}
+  #default_tkt_enctypes = {{encryption_types}}
+
+{% if domains %}
+[domain_realm]
+{% for domain in domains.split(',') %}
+  {{domain}} = {{realm|upper()}}
+{% endfor %}
+{% endif %}
+
+[logging]
+  default = FILE:/var/log/krb5kdc.log
+  admin_server = FILE:/var/log/kadmind.log
+  kdc = FILE:/var/log/krb5kdc.log
+
+[realms]
+  {{realm}} = {
+{%- if kdc_hosts > 0 -%}
+{%- set kdc_host_list = kdc_hosts.split(',')  -%}
+{%- if kdc_host_list and kdc_host_list|length > 0 %}
+    admin_server = {{admin_server_host|default(kdc_host_list[0]|trim(), True)}}
+{%- if kdc_host_list -%}
+{% for kdc_host in kdc_host_list %}
+    kdc = {{kdc_host|trim()}}
+{%- endfor -%}
+{% endif %}
+{%- endif %}
+{%- endif %}
+  }
+
+{# Append additional realm declarations below #}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/alerts.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/alerts.json
new file mode 100755
index 0000000..4986e04
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/alerts.json
@@ -0,0 +1,32 @@
+{
+  "KNOX": {
+    "service": [],
+    "KNOX_GATEWAY": [
+      {
+        "name": "knox_gateway_process",
+        "label": "Knox Gateway Process",
+        "description": "This host-level alert is triggered if the Knox Gateway cannot be determined to be up.",
+        "interval": 1,
+        "scope": "HOST",
+        "source": {
+          "type": "PORT",
+          "uri": "{{gateway-site/gateway.port}}",
+          "default_port": 8443,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/configuration/gateway-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/configuration/gateway-log4j.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/configuration/gateway-log4j.xml
new file mode 100755
index 0000000..370f786
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/configuration/gateway-log4j.xml
@@ -0,0 +1,83 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <value>
+
+      # Licensed to the Apache Software Foundation (ASF) under one
+      # or more contributor license agreements. See the NOTICE file
+      # distributed with this work for additional information
+      # regarding copyright ownership. The ASF licenses this file
+      # to you under the Apache License, Version 2.0 (the
+      # "License"); you may not use this file except in compliance
+      # with the License. You may obtain a copy of the License at
+      #
+      # http://www.apache.org/licenses/LICENSE-2.0
+      #
+      # Unless required by applicable law or agreed to in writing, software
+      # distributed under the License is distributed on an "AS IS" BASIS,
+      # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+      # See the License for the specific language governing permissions and
+      # limitations under the License.
+
+      app.log.dir=${launcher.dir}/../logs
+      app.log.file=${launcher.name}.log
+      app.audit.file=${launcher.name}-audit.log
+
+      log4j.rootLogger=ERROR, drfa
+
+      log4j.logger.org.apache.hadoop.gateway=INFO
+      #log4j.logger.org.apache.hadoop.gateway=DEBUG
+
+      #log4j.logger.org.eclipse.jetty=DEBUG
+      #log4j.logger.org.apache.shiro=DEBUG
+      #log4j.logger.org.apache.http=DEBUG
+      #log4j.logger.org.apache.http.client=DEBUG
+      #log4j.logger.org.apache.http.headers=DEBUG
+      #log4j.logger.org.apache.http.wire=DEBUG
+
+      log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+      log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+      log4j.appender.stdout.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+      log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender
+      log4j.appender.drfa.File=${app.log.dir}/${app.log.file}
+      log4j.appender.drfa.DatePattern=.yyyy-MM-dd
+      log4j.appender.drfa.layout=org.apache.log4j.PatternLayout
+      log4j.appender.drfa.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+      log4j.logger.audit=INFO, auditfile
+      log4j.appender.auditfile=org.apache.log4j.DailyRollingFileAppender
+      log4j.appender.auditfile.File=${app.log.dir}/${app.audit.file}
+      log4j.appender.auditfile.Append = true
+      log4j.appender.auditfile.DatePattern = '.'yyyy-MM-dd
+      log4j.appender.auditfile.layout = org.apache.hadoop.gateway.audit.log4j.layout.AuditLayout
+
+    </value>
+    <description>
+      content for log4j.properties file for Knox.
+    </description>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/configuration/gateway-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/configuration/gateway-site.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/configuration/gateway-site.xml
new file mode 100755
index 0000000..0e81d0a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/configuration/gateway-site.xml
@@ -0,0 +1,68 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+<!-- The default settings for Knox. -->
+<!-- Edit gateway-site.xml to change settings for your local -->
+<!-- install. -->
+
+<configuration supports_final="false">
+
+    <property>
+        <name>gateway.port</name>
+        <value>8443</value>
+        <description>The HTTP port for the Gateway.</description>
+    </property>
+
+    <property>
+        <name>gateway.path</name>
+        <value>gateway</value>
+        <description>The default context path for the gateway.</description>
+    </property>
+
+    <property>
+        <name>gateway.gateway.conf.dir</name>
+        <value>deployments</value>
+        <description>The directory within GATEWAY_HOME that contains gateway topology files and deployments.</description>
+    </property>
+
+    <property>
+        <name>gateway.hadoop.kerberos.secured</name>
+        <value>false</value>
+        <description>Boolean flag indicating whether the Hadoop cluster protected by Gateway is secured with Kerberos</description>
+    </property>
+
+    <property>
+        <name>java.security.krb5.conf</name>
+        <value>/etc/knox/conf/krb5.conf</value>
+        <description>Absolute path to krb5.conf file</description>
+    </property>
+
+    <property>
+        <name>java.security.auth.login.config</name>
+        <value>/etc/knox/conf/krb5JAASLogin.conf</value>
+        <description>Absolute path to JASS login config file</description>
+    </property>
+
+    <property>
+        <name>sun.security.krb5.debug</name>
+        <value>true</value>
+        <description>Boolean flag indicating whether to enable debug messages for krb5 authentication</description>
+    </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/configuration/knox-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/configuration/knox-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/configuration/knox-env.xml
new file mode 100755
index 0000000..40504f6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/configuration/knox-env.xml
@@ -0,0 +1,64 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="true">
+    <!-- knox-env.sh -->
+
+    <property require-input="true">
+      <name>knox_master_secret</name>
+      <display-name>Knox Master Secret</display-name>
+      <value></value>
+      <property-type>PASSWORD</property-type>
+      <description>password to use as the master secret</description>
+      <value-attributes>
+        <editable-only-at-install>true</editable-only-at-install>
+        <overridable>false</overridable>
+      </value-attributes>
+    </property>
+
+    <property>
+        <name>knox_user</name>
+        <value>knox</value>
+        <property-type>USER</property-type>
+        <description>Knox Username.</description>
+    </property>
+
+    <property>
+        <name>knox_group</name>
+        <value>knox</value>
+        <property-type>GROUP</property-type>
+        <description>Knox Group.</description>
+    </property>
+
+    <property>
+      <name>knox_pid_dir</name>
+      <display-name>Knox PID dir</display-name>
+      <value>/var/run/knox</value>
+      <description>Knox PID dir.</description>
+      <value-attributes>
+        <type>directory</type>
+        <editable-only-at-install>true</editable-only-at-install>
+        <overridable>false</overridable>
+      </value-attributes>
+    </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/configuration/ldap-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/configuration/ldap-log4j.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/configuration/ldap-log4j.xml
new file mode 100755
index 0000000..a0cf658
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/configuration/ldap-log4j.xml
@@ -0,0 +1,66 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <value>
+        # Licensed to the Apache Software Foundation (ASF) under one
+        # or more contributor license agreements.  See the NOTICE file
+        # distributed with this work for additional information
+        # regarding copyright ownership.  The ASF licenses this file
+        # to you under the Apache License, Version 2.0 (the
+        # "License"); you may not use this file except in compliance
+        # with the License.  You may obtain a copy of the License at
+        #
+        #     http://www.apache.org/licenses/LICENSE-2.0
+        #
+        # Unless required by applicable law or agreed to in writing, software
+        # distributed under the License is distributed on an "AS IS" BASIS,
+        # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+        # See the License for the specific language governing permissions and
+        # limitations under the License.
+
+        app.log.dir=${launcher.dir}/../logs
+        app.log.file=${launcher.name}.log
+
+        log4j.rootLogger=ERROR, drfa
+        log4j.logger.org.apache.directory.server.ldap.LdapServer=INFO
+        log4j.logger.org.apache.directory=WARN
+
+        log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+        log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+        log4j.appender.stdout.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+        log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender
+        log4j.appender.drfa.File=${app.log.dir}/${app.log.file}
+        log4j.appender.drfa.DatePattern=.yyyy-MM-dd
+        log4j.appender.drfa.layout=org.apache.log4j.PatternLayout
+        log4j.appender.drfa.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+    </value>
+    <description>
+      content for log4j.properties file for the demo LDAP that comes with Knox.
+    </description>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/configuration/topology.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/configuration/topology.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/configuration/topology.xml
new file mode 100755
index 0000000..162fffc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/configuration/topology.xml
@@ -0,0 +1,157 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false" supports_adding_forbidden="true">
+    <!-- topology file -->
+
+    <property>
+    <name>content</name>
+    <value>
+        &lt;topology&gt;
+
+            &lt;gateway&gt;
+
+                &lt;provider&gt;
+                    &lt;role&gt;authentication&lt;/role&gt;
+                    &lt;name&gt;ShiroProvider&lt;/name&gt;
+                    &lt;enabled&gt;true&lt;/enabled&gt;
+                    &lt;param&gt;
+                        &lt;name&gt;sessionTimeout&lt;/name&gt;
+                        &lt;value&gt;30&lt;/value&gt;
+                    &lt;/param&gt;
+                    &lt;param&gt;
+                        &lt;name&gt;main.ldapRealm&lt;/name&gt;
+                        &lt;value&gt;org.apache.hadoop.gateway.shirorealm.KnoxLdapRealm&lt;/value&gt;
+                    &lt;/param&gt;
+                    &lt;param&gt;
+                        &lt;name&gt;main.ldapRealm.userDnTemplate&lt;/name&gt;
+                        &lt;value&gt;uid={0},ou=people,dc=hadoop,dc=apache,dc=org&lt;/value&gt;
+                    &lt;/param&gt;
+                    &lt;param&gt;
+                        &lt;name&gt;main.ldapRealm.contextFactory.url&lt;/name&gt;
+                        &lt;value&gt;ldap://{{knox_host_name}}:33389&lt;/value&gt;
+                    &lt;/param&gt;
+                    &lt;param&gt;
+                        &lt;name&gt;main.ldapRealm.contextFactory.authenticationMechanism&lt;/name&gt;
+                        &lt;value&gt;simple&lt;/value&gt;
+                    &lt;/param&gt;
+                    &lt;param&gt;
+                        &lt;name&gt;urls./**&lt;/name&gt;
+                        &lt;value&gt;authcBasic&lt;/value&gt;
+                    &lt;/param&gt;
+                &lt;/provider&gt;
+
+                &lt;provider&gt;
+                    &lt;role&gt;identity-assertion&lt;/role&gt;
+                    &lt;name&gt;Default&lt;/name&gt;
+                    &lt;enabled&gt;true&lt;/enabled&gt;
+                &lt;/provider&gt;
+
+            &lt;/gateway&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;NAMENODE&lt;/role&gt;
+                &lt;url&gt;hdfs://{{namenode_host}}:{{namenode_rpc_port}}&lt;/url&gt;
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;JOBTRACKER&lt;/role&gt;
+                &lt;url&gt;rpc://{{rm_host}}:{{jt_rpc_port}}&lt;/url&gt;
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;WEBHDFS&lt;/role&gt;
+                &lt;url&gt;http://{{namenode_host}}:{{namenode_http_port}}/webhdfs&lt;/url&gt;
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;WEBHCAT&lt;/role&gt;
+                &lt;url&gt;http://{{webhcat_server_host}}:{{templeton_port}}/templeton&lt;/url&gt;
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;OOZIE&lt;/role&gt;
+                &lt;url&gt;http://{{oozie_server_host}}:{{oozie_server_port}}/oozie&lt;/url&gt;
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;WEBHBASE&lt;/role&gt;
+                &lt;url&gt;http://{{hbase_master_host}}:{{hbase_master_port}}&lt;/url&gt;
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;HIVE&lt;/role&gt;
+                &lt;url&gt;http://{{hive_server_host}}:{{hive_http_port}}/{{hive_http_path}}&lt;/url&gt;
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;RESOURCEMANAGER&lt;/role&gt;
+                &lt;url&gt;http://{{rm_host}}:{{rm_port}}/ws&lt;/url&gt;
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;HDFSUI&lt;/role&gt;
+                &lt;url&gt;http://{{namenode_host}}:{{namenode_http_port}}&lt;/url&gt;
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;HBASEUI&lt;/role&gt;
+                &lt;url&gt;http://{{hbase_master_host}}:{{hbase_master_ui_port}}&lt;/url&gt;
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;SOLR&lt;/role&gt;
+                &lt;url&gt;http://{{solr_host}}:{{solr_port}}/solr&lt;/url&gt;
+            &lt;/service&gt;
+
+
+            &lt;service&gt;
+                &lt;role&gt;SPARKUI&lt;/role&gt;
+                &lt;url&gt;http://{{spark_historyserver_host}}:{{spark_historyserver_ui_port}}&lt;/url&gt;
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;OOZIEUI&lt;/role&gt;
+                &lt;url&gt;http://{{oozie_server_host}}:{{oozie_server_port}}/oozie&lt;/url&gt;
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;YARNUI&lt;/role&gt;
+                &lt;url&gt;http://{{rm_host}}:{{rm_port}}&lt;/url&gt;
+            &lt;/service&gt;
+
+            &lt;service&gt;
+                &lt;role&gt;JOBSTORYUI&lt;/role&gt;
+                &lt;url&gt;http://{{mr_historyserver_address}}&lt;/url&gt;
+            &lt;/service&gt;
+
+        &lt;/topology&gt;
+    </value>
+    <description>
+        The configuration specifies the Hadoop cluster services Knox will provide access to.
+    </description>
+    <value-attributes>
+       <empty-value-valid>true</empty-value-valid>
+       <show-property-name>false</show-property-name>
+    </value-attributes>
+    </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/configuration/users-ldif.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/configuration/users-ldif.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/configuration/users-ldif.xml
new file mode 100755
index 0000000..6f6fa54
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/configuration/users-ldif.xml
@@ -0,0 +1,138 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false" supports_adding_forbidden="true">
+
+    <property>
+        <name>content</name>
+        <value>
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+version: 1
+
+# Please replace with site specific values
+dn: dc=hadoop,dc=apache,dc=org
+objectclass: organization
+objectclass: dcObject
+o: Hadoop
+dc: hadoop
+
+# Entry for a sample people container
+# Please replace with site specific values
+dn: ou=people,dc=hadoop,dc=apache,dc=org
+objectclass:top
+objectclass:organizationalUnit
+ou: people
+
+# Entry for a sample end user
+# Please replace with site specific values
+dn: uid=guest,ou=people,dc=hadoop,dc=apache,dc=org
+objectclass:top
+objectclass:person
+objectclass:organizationalPerson
+objectclass:inetOrgPerson
+cn: Guest
+sn: User
+uid: guest
+userPassword:guest-password
+
+# entry for sample user admin
+dn: uid=admin,ou=people,dc=hadoop,dc=apache,dc=org
+objectclass:top
+objectclass:person
+objectclass:organizationalPerson
+objectclass:inetOrgPerson
+cn: Admin
+sn: Admin
+uid: admin
+userPassword:admin-password
+
+# entry for sample user sam
+dn: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org
+objectclass:top
+objectclass:person
+objectclass:organizationalPerson
+objectclass:inetOrgPerson
+cn: sam
+sn: sam
+uid: sam
+userPassword:sam-password
+
+# entry for sample user tom
+dn: uid=tom,ou=people,dc=hadoop,dc=apache,dc=org
+objectclass:top
+objectclass:person
+objectclass:organizationalPerson
+objectclass:inetOrgPerson
+cn: tom
+sn: tom
+uid: tom
+userPassword:tom-password
+
+# create FIRST Level groups branch
+dn: ou=groups,dc=hadoop,dc=apache,dc=org
+objectclass:top
+objectclass:organizationalUnit
+ou: groups
+description: generic groups branch
+
+# create the analyst group under groups
+dn: cn=analyst,ou=groups,dc=hadoop,dc=apache,dc=org
+objectclass:top
+objectclass: groupofnames
+cn: analyst
+description:analyst  group
+member: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org
+member: uid=tom,ou=people,dc=hadoop,dc=apache,dc=org
+
+
+# create the scientist group under groups
+dn: cn=scientist,ou=groups,dc=hadoop,dc=apache,dc=org
+objectclass:top
+objectclass: groupofnames
+cn: scientist
+description: scientist group
+member: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org
+
+        </value>
+        <description>
+            content for users-ldif file for the demo LDAP that comes with Knox.
+        </description>
+        <value-attributes>
+          <show-property-name>false</show-property-name>
+        </value-attributes>
+    </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/kerberos.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/kerberos.json
new file mode 100755
index 0000000..5efd581
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/kerberos.json
@@ -0,0 +1,62 @@
+{
+  "services": [
+    {
+      "name": "KNOX",
+      "components": [
+        {
+          "name": "KNOX_GATEWAY",
+          "identities": [
+            {
+              "name": "knox_principal",
+              "principal": {
+                "value": "${knox-env/knox_user}/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "knox-env/knox_principal_name",
+                "local_username": "${knox-env/knox_user}"
+
+              },
+              "keytab": {
+                "file": "${keytab_dir}/knox.service.keytab",
+                "owner": {
+                  "name": "${knox-env/knox_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "knox-env/knox_keytab_path"
+              }
+            }
+          ],
+          "configurations": [
+            {
+              "gateway-site": {
+                "gateway.hadoop.kerberos.secured": "true",
+                "java.security.krb5.conf": "/etc/krb5.conf"
+              }
+            },
+            {
+              "core-site": {
+                "hadoop.proxyuser.${knox-env/knox_user}.groups": "${hadoop-env/proxyuser_group}",
+                "hadoop.proxyuser.${knox-env/knox_user}.hosts": "${clusterHostInfo/knox_gateway_hosts}"
+              }
+            },
+            {
+              "webhcat-site": {
+                "webhcat.proxyuser.${knox-env/knox_user}.groups": "${hadoop-env/proxyuser_group}",
+                "webhcat.proxyuser.${knox-env/knox_user}.hosts": "${clusterHostInfo/knox_gateway_hosts}"
+              }
+            },
+            {
+              "oozie-site": {
+                "oozie.service.ProxyUserService.proxyuser.${knox-env/knox_user}.groups": "${hadoop-env/proxyuser_group}",
+                "oozie.service.ProxyUserService.proxyuser.${knox-env/knox_user}.hosts": "${clusterHostInfo/knox_gateway_hosts}"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/metainfo.xml
new file mode 100755
index 0000000..6093bfe
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/metainfo.xml
@@ -0,0 +1,88 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>KNOX</name>
+      <displayName>Knox</displayName>
+      <comment>Provides a single point of authentication and access for Apache Hadoop services in a cluster</comment>
+      <version>0.6.0</version>
+      <components>
+        <component>
+          <name>KNOX_GATEWAY</name>
+          <displayName>Knox Gateway</displayName>
+          <category>MASTER</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/knox_gateway.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+            <customCommands>
+                <customCommand>
+                    <name>STARTDEMOLDAP</name>
+                    <commandScript>
+                        <script>scripts/demo_ldap.py</script>
+                        <scriptType>PYTHON</scriptType>
+                        <timeout>600</timeout>
+                    </commandScript>
+                </customCommand>
+                <customCommand>
+                    <name>STOPDEMOLDAP</name>
+                    <commandScript>
+                        <script>scripts/demo_ldap.py</script>
+                        <scriptType>PYTHON</scriptType>
+                        <timeout>600</timeout>
+                    </commandScript>
+                </customCommand>
+            </customCommands>
+        </component>
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>knox-0.5.0*</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>knox-0.5.0*</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+      <configuration-dependencies>
+        <config-type>gateway-site</config-type>
+        <config-type>gateway-log4j</config-type>
+        <config-type>topology</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/package/files/validateKnoxStatus.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/package/files/validateKnoxStatus.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/package/files/validateKnoxStatus.py
new file mode 100755
index 0000000..0d3b14d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/package/files/validateKnoxStatus.py
@@ -0,0 +1,42 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import optparse
+import socket
+
+#
+# Main.
+#
+def main():
+  parser = optparse.OptionParser(usage="usage: %prog [options]")
+  parser.add_option("-p", "--port", dest="port", help="Port for Knox process")
+  parser.add_option("-n", "--hostname", dest="hostname", help="Hostname of Knox Gateway component")
+
+  (options, args) = parser.parse_args()
+  timeout_seconds = 5
+  try:
+    s = socket.create_connection((options.hostname, int(options.port)),timeout=timeout_seconds)
+    print "Successfully connected to %s on port %s" % (options.hostname, options.port)
+    s.close()
+  except socket.error, e:
+    print "Connection to %s on port %s failed: %s" % (options.hostname, options.port, e)
+    exit(1)
+
+if __name__ == "__main__":
+  main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/package/scripts/knox.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/package/scripts/knox.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/package/scripts/knox.py
new file mode 100755
index 0000000..a98af31
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/package/scripts/knox.py
@@ -0,0 +1,134 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+from resource_management import *
+
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.core.resources.service import ServiceConfig
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.resources.template_config import TemplateConfig
+from resource_management.core.resources.system import File, Execute, Directory
+from resource_management.core.shell import as_user
+from resource_management.core.source import InlineTemplate
+
+
+from resource_management.core.logger import Logger
+
+#@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+#def knox():
+#  import params
+#
+#  XmlConfig("gateway-site.xml",
+#            conf_dir=params.knox_conf_dir,
+#            configurations=params.config['configurations']['gateway-site'],
+#            configuration_attributes=params.config['configuration_attributes']['gateway-site'],
+#            owner=params.knox_user
+#  )
+#
+#  # Manually overriding service logon user & password set by the installation package
+#  ServiceConfig(params.knox_gateway_win_service_name,
+#                action="change_user",
+#                username = params.knox_user,
+#                password = Script.get_password(params.knox_user))
+#
+#  File(os.path.join(params.knox_conf_dir, "gateway-log4j.properties"),
+#       owner=params.knox_user,
+#       content=params.gateway_log4j
+#  )
+#
+#  File(os.path.join(params.knox_conf_dir, "topologies", "default.xml"),
+#       group=params.knox_group,
+#       owner=params.knox_user,
+#       content=InlineTemplate(params.topology_template)
+#  )
+#
+#  if params.security_enabled:
+#    TemplateConfig( os.path.join(params.knox_conf_dir, "krb5JAASLogin.conf"),
+#        owner = params.knox_user,
+#        template_tag = None
+#    )
+#
+#  if not os.path.isfile(params.knox_master_secret_path):
+#    cmd = format('cmd /C {knox_client_bin} create-master --master {knox_master_secret!p}')
+#    Execute(cmd)
+#    cmd = format('cmd /C {knox_client_bin} create-cert --hostname {knox_host_name_in_cluster}')
+#    Execute(cmd)
+#
+#@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def knox():
+    import params
+
+    Directory([params.knox_data_dir, params.knox_logs_dir, params.knox_pid_dir, params.knox_conf_dir, os.path.join(params.knox_conf_dir, "topologies")],
+              owner = params.knox_user,
+              group = params.knox_group,
+              create_parents = True,
+              cd_access = "a",
+              mode = 0755,
+              recursive_ownership = True,
+              recursion_follow_links = True,
+    )
+
+
+    XmlConfig("gateway-site.xml",
+              conf_dir=params.knox_conf_dir,
+              configurations=params.config['configurations']['gateway-site'],
+              configuration_attributes=params.config['configuration_attributes']['gateway-site'],
+              owner=params.knox_user,
+              group=params.knox_group,
+    )
+
+    File(format("{params.knox_conf_dir}/gateway-log4j.properties"),
+         mode=0644,
+         group=params.knox_group,
+         owner=params.knox_user,
+         content=params.gateway_log4j
+    )
+
+    File(format("{params.knox_conf_dir}/topologies/default.xml"),
+         group=params.knox_group,
+         owner=params.knox_user,
+         content=InlineTemplate(params.topology_template)
+    )
+    if params.security_enabled:
+      TemplateConfig( format("{knox_conf_dir}/krb5JAASLogin.conf"),
+                      owner = params.knox_user,
+                      template_tag = None
+      )
+
+
+    cmd = format('{knox_client_bin} create-master --master {knox_master_secret!p}')
+    master_secret_exist = as_user(format('test -f {knox_master_secret_path}'), params.knox_user)
+
+    Execute(cmd,
+            user=params.knox_user,
+            environment={'JAVA_HOME': params.java_home},
+            not_if=master_secret_exist,
+    )
+
+    cmd = format('{knox_client_bin} create-cert --hostname {knox_host_name_in_cluster}')
+    cert_store_exist = as_user(format('test -f {knox_cert_store_path}'), params.knox_user)
+
+    Execute(cmd,
+            user=params.knox_user,
+            environment={'JAVA_HOME': params.java_home},
+            not_if=cert_store_exist,
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/package/scripts/knox_gateway.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/package/scripts/knox_gateway.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/package/scripts/knox_gateway.py
new file mode 100755
index 0000000..6cc97ac
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/package/scripts/knox_gateway.py
@@ -0,0 +1,290 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+import os
+import tarfile
+
+
+
+from resource_management.libraries.functions import stack_select
+
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, validate_security_config_properties, get_params_from_filesystem, \
+  FILE_TYPE_XML
+import sys
+
+
+#if OSCheck.is_windows_family():
+#  from resource_management.libraries.functions.windows_service_utils import check_windows_service_status
+
+import upgrade
+from knox import knox
+from knox_ldap import ldap
+
+#from setup_ranger_knox import setup_ranger_knox
+
+class KnoxGateway(Script):
+
+
+  def get_component_name(self):
+    return "knox-server"
+
+  def install(self, env):
+    self.install_packages(env)
+    import params
+    env.set_params(params)
+
+    File(format('{knox_conf_dir}/topologies/sandbox.xml'),
+         action = "delete",
+    )
+
+  def configure(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    knox()
+    ldap()
+
+
+
+#@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+#class KnoxGatewayWindows(KnoxGateway):
+#  def start(self, env):
+#    import params
+#    env.set_params(params)
+#    self.configure(env)
+#    # setup_ranger_knox(env)
+#    Service(params.knox_gateway_win_service_name, action="start")
+#
+#  def stop(self, env):
+#    import params
+#    env.set_params(params)
+#    Service(params.knox_gateway_win_service_name, action="stop")
+#
+#  def status(self, env):
+#    import status_params
+#    env.set_params(status_params)
+#    check_windows_service_status(status_params.knox_gateway_win_service_name)
+#
+#  def startdemoldap(self, env):
+#    import params
+#    env.set_params(params)
+#    self.configureldap(env)
+#    Service(params.knox_ldap_win_service_name, action="start")
+#
+#  def stopdemoldap(self, env):
+#    import params
+#    env.set_params(params)
+#    Service(params.knox_ldap_win_service_name, action="stop")
+
+
+
+#@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+#class KnoxGatewayDefault(KnoxGateway):
+#  def get_component_name(self):
+#    return {"HDP": "knox-server"}
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
+
+      absolute_backup_dir = None
+      if params.upgrade_direction and params.upgrade_direction == Direction.UPGRADE:
+        Logger.info("Backing up directories. Initial conf folder: %s" % os.path.realpath(params.knox_conf_dir))
+
+        # This will backup the contents of the conf directory into /tmp/knox-upgrade-backup/knox-conf-backup.tar
+        absolute_backup_dir = upgrade.backup_data()
+
+      # conf-select will change the symlink to the conf folder.
+      conf_select.select(params.stack_name, "knox", params.version)
+      stack_select.select("knox-server", params.version)
+
+      # Extract the tar of the old conf folder into the new conf directory
+      if absolute_backup_dir is not None and params.upgrade_direction and params.upgrade_direction == Direction.UPGRADE:
+        conf_tar_source_path = os.path.join(absolute_backup_dir, upgrade.BACKUP_CONF_ARCHIVE)
+        data_tar_source_path = os.path.join(absolute_backup_dir, upgrade.BACKUP_DATA_ARCHIVE)
+        if os.path.exists(conf_tar_source_path):
+          extract_dir = os.path.realpath(params.knox_conf_dir)
+          conf_tar_dest_path = os.path.join(extract_dir, upgrade.BACKUP_CONF_ARCHIVE)
+          Logger.info("Copying %s into %s file." % (upgrade.BACKUP_CONF_ARCHIVE, conf_tar_dest_path))
+          Execute(('cp', conf_tar_source_path, conf_tar_dest_path),
+                  sudo = True,
+          )
+
+          tar_archive.untar_archive(conf_tar_source_path, extract_dir)
+
+          File(conf_tar_dest_path,
+               action = "delete",
+          )
+          extract_dir = os.path.realpath(params.knox_data_dir+"-"+params.version+"/security")
+          if not os.path.exists(extract_dir):
+             Directory(extract_dir,
+                       owner = params.knox_user,
+                       group = params.knox_group,
+                       create_parents = True
+             )
+          data_tar_dest_path = os.path.join(extract_dir, upgrade.BACKUP_DATA_ARCHIVE)
+          Logger.info("Copying %s into %s file." % (upgrade.BACKUP_DATA_ARCHIVE, data_tar_dest_path + "/security"))
+          Execute(('cp', data_tar_source_path, data_tar_dest_path ),
+                  sudo = True,
+          )
+
+          tar_archive.untar_archive(data_tar_source_path, extract_dir)
+
+          File(data_tar_dest_path,
+               action = "delete",
+          )
+
+          populate_topology_template = format('cp /usr/iop/{version}/etc/knox/conf.dist/topologies/* {knox_conf_dir}/topologies')
+          Logger.info("Prepare to populate topologies template via command: {0}".format(populate_topology_template))
+          Execute(populate_topology_template)
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env)
+    daemon_cmd = format('{knox_bin} start')
+    populate_topology = format('cd {knox_conf_dir}/topologies/; {sudo} ambari-python-wrap ./generate_template.py; {sudo} chmod 777 *.xml')
+    no_op_test = format('ls {knox_pid_file} >/dev/null 2>&1 && ps -p `cat {knox_pid_file}` >/dev/null 2>&1')
+    #setup_ranger_knox(upgrade_type=upgrade_type)
+    # Used to setup symlink, needed to update the knox managed symlink, in case of custom locations
+    if os.path.islink(params.knox_managed_pid_symlink) and os.path.realpath(params.knox_managed_pid_symlink) != params.knox_pid_dir:
+      os.unlink(params.knox_managed_pid_symlink)
+      os.symlink(params.knox_pid_dir, params.knox_managed_pid_symlink)
+
+    Execute(populate_topology)
+    Execute(daemon_cmd,
+            user=params.knox_user,
+            environment={'JAVA_HOME': params.java_home},
+            not_if=no_op_test
+    )
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env)
+    daemon_cmd = format('{knox_bin} stop')
+    Execute(daemon_cmd,
+            environment={'JAVA_HOME': params.java_home},
+            user=params.knox_user,
+    )
+    File(params.knox_pid_file,
+         action="delete",
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.knox_pid_file)
+
+
+  def configureldap(self, env):
+    import params
+    env.set_params(params)
+    ldap()
+
+  def startdemoldap(self, env):
+    import params
+    env.set_params(params)
+    self.configureldap(env)
+    daemon_cmd = format('{ldap_bin} start')
+    no_op_test = format('ls {ldap_pid_file} >/dev/null 2>&1 && ps -p `cat {ldap_pid_file}` >/dev/null 2>&1')
+    Execute(daemon_cmd,
+            user=params.knox_user,
+            environment={'JAVA_HOME': params.java_home},
+            not_if=no_op_test
+    )
+
+  def stopdemoldap(self, env):
+    import params
+    env.set_params(params)
+    self.configureldap(env)
+    daemon_cmd = format('{ldap_bin} stop')
+    Execute(daemon_cmd,
+            environment={'JAVA_HOME': params.java_home},
+            user=params.knox_user,
+            )
+    Execute (format("rm -f {ldap_pid_file}"))
+
+  def security_status(self, env):
+    import status_params
+    env.set_params(status_params)
+
+    if status_params.security_enabled:
+      expectations = {}
+      expectations.update(build_expectations(
+        'krb5JAASLogin',
+        None,
+        ['keytab', 'principal'],
+        None
+      ))
+      expectations.update(build_expectations(
+        'gateway-site',
+        {
+          "gateway.hadoop.kerberos.secured" : "true"
+        },
+        None,
+        None
+      ))
+
+      security_params = {
+        "krb5JAASLogin":
+          {
+            'keytab': status_params.knox_keytab_path,
+            'principal': status_params.knox_principal_name
+          }
+      }
+      security_params.update(get_params_from_filesystem(status_params.knox_conf_dir,
+        {"gateway-site.xml" : FILE_TYPE_XML}))
+
+      result_issues = validate_security_config_properties(security_params, expectations)
+      if not result_issues:  # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if ( 'krb5JAASLogin' not in security_params
+               or 'keytab' not in security_params['krb5JAASLogin']
+               or 'principal' not in security_params['krb5JAASLogin']):
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out({"securityIssuesFound": "Keytab file and principal are not set."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.knox_user,
+                                security_params['krb5JAASLogin']['keytab'],
+                                security_params['krb5JAASLogin']['principal'],
+                                status_params.hostname,
+                                status_params.temp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+
+if __name__ == "__main__":
+  KnoxGateway().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/package/scripts/knox_ldap.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/package/scripts/knox_ldap.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/package/scripts/knox_ldap.py
new file mode 100755
index 0000000..7712396
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/package/scripts/knox_ldap.py
@@ -0,0 +1,54 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+from resource_management import *
+
+def _ldap_common():
+    import params
+
+    File(os.path.join(params.knox_conf_dir, 'ldap-log4j.properties'),
+         mode=params.mode,
+         group=params.knox_group,
+         owner=params.knox_user,
+         content=params.ldap_log4j
+    )
+
+    File(os.path.join(params.knox_conf_dir, 'users.ldif'),
+         mode=params.mode,
+         group=params.knox_group,
+         owner=params.knox_user,
+         content=params.users_ldif
+    )
+
+#@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+#def ldap():
+#  import params
+#
+#  # Manually overriding service logon user & password set by the installation package
+#  ServiceConfig(params.knox_ldap_win_service_name,
+#                action="change_user",
+#                username = params.knox_user,
+#                password = Script.get_password(params.knox_user))
+#
+#  _ldap_common()
+
+#@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def ldap():
+  _ldap_common()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/package/scripts/ldap.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/package/scripts/ldap.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/package/scripts/ldap.py
new file mode 100755
index 0000000..d0bab65
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/package/scripts/ldap.py
@@ -0,0 +1,55 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+from resource_management import *
+
+
+def _ldap_common():
+    import params
+
+    File(os.path.join(params.knox_conf_dir, 'ldap-log4j.properties'),
+         mode=params.mode,
+         group=params.knox_group,
+         owner=params.knox_user,
+         content=params.ldap_log4j
+    )
+
+    File(os.path.join(params.knox_conf_dir, 'users.ldif'),
+         mode=params.mode,
+         group=params.knox_group,
+         owner=params.knox_user,
+         content=params.users_ldif
+    )
+
+#@OsFamilyFuncImpl(os_#family=OSConst.WINSRV_FAMILY)
+#def ldap():
+#  import params
+#
+#  # Manually overriding service logon user & password set by the installation package
+#  ServiceConfig(params.knox_ldap_win_service_name,
+#                action="change_user",
+#                username = params.knox_user,
+#                password = Script.get_password(params.knox_user))
+#
+#  _ldap_common()
+
+#@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def ldap():
+  _ldap_common()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/package/scripts/params.py
new file mode 100755
index 0000000..9fec3a3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/package/scripts/params.py
@@ -0,0 +1,172 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+from resource_management.libraries.functions.version import format_stack_version, compare_versions
+from resource_management.libraries.functions.default import default
+from resource_management import *
+import status_params
+
+ibm_distribution_knox_dir = '/usr/iop/current/knox-server'
+ibm_distribution_knox_var = '/var'
+
+
+# server configurations
+config = Script.get_config()
+
+tmp_dir = Script.get_tmp_dir()
+sudo = AMBARI_SUDO_BINARY
+
+stack_name = default("/hostLevelParams/stack_name", None)
+
+upgrade_direction = default("/commandParams/upgrade_direction", None)
+version = default("/commandParams/version", None)
+
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+stack_version = format_stack_version(stack_version_unformatted)
+
+knox_bin = ibm_distribution_knox_dir + '/bin/gateway.sh'
+ldap_bin = ibm_distribution_knox_dir + '/bin/ldap.sh'
+knox_client_bin = ibm_distribution_knox_dir + '/bin/knoxcli.sh'
+
+namenode_hosts = default("/clusterHostInfo/namenode_host", None)
+if type(namenode_hosts) is list:
+  namenode_host = namenode_hosts[0]
+else:
+  namenode_host = namenode_hosts
+
+has_namenode = not namenode_host == None
+namenode_http_port = "50070"
+namenode_rpc_port = "8020"
+
+if has_namenode:
+  if 'dfs.namenode.http-address' in config['configurations']['hdfs-site']:
+    namenode_http_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.namenode.http-address'])
+
+  if 'dfs.namenode.rpc-address' in config['configurations']['hdfs-site']:
+    namenode_rpc_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.namenode.rpc-address'])
+
+rm_hosts = default("/clusterHostInfo/rm_host", None)
+if type(rm_hosts) is list:
+  rm_host = rm_hosts[0]
+else:
+  rm_host = rm_hosts
+has_rm = not rm_host == None
+
+jt_rpc_port = "8050"
+rm_port = "8080"
+
+if has_rm:
+  if 'yarn.resourcemanager.address' in config['configurations']['yarn-site']:
+    jt_rpc_port = get_port_from_url(config['configurations']['yarn-site']['yarn.resourcemanager.address'])
+
+  if 'yarn.resourcemanager.webapp.address' in config['configurations']['yarn-site']:
+    rm_port = get_port_from_url(config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'])
+
+hive_http_port = default('/configurations/hive-site/hive.server2.thrift.http.port', "10001")
+hive_http_path = default('/configurations/hive-site/hive.server2.thrift.http.path', "cliservice")
+hive_server_hosts = default("/clusterHostInfo/hive_server_host", None)
+if type(hive_server_hosts) is list:
+  hive_server_host = hive_server_hosts[0]
+else:
+  hive_server_host = hive_server_hosts
+
+templeton_port = default('/configurations/webhcat-site/templeton.port', "50111")
+webhcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", None)
+if type(webhcat_server_hosts) is list:
+  webhcat_server_host = webhcat_server_hosts[0]
+else:
+  webhcat_server_host = webhcat_server_hosts
+
+hbase_master_port = default('/configurations/hbase-site/hbase.rest.port', "8080")
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", None)
+if type(hbase_master_hosts) is list:
+  hbase_master_host = hbase_master_hosts[0]
+else:
+  hbase_master_host = hbase_master_hosts
+
+oozie_server_hosts = default("/clusterHostInfo/oozie_server", None)
+if type(oozie_server_hosts) is list:
+  oozie_server_host = oozie_server_hosts[0]
+else:
+  oozie_server_host = oozie_server_hosts
+
+has_oozie = not oozie_server_host == None
+oozie_server_port = "11000"
+
+if has_oozie:
+    if 'oozie.base.url' in config['configurations']['oozie-site']:
+        oozie_server_port = get_port_from_url(config['configurations']['oozie-site']['oozie.base.url'])
+
+# Knox managed properties
+knox_managed_pid_symlink= "/usr/iop/current/knox-server/pids"
+
+#
+#Hbase master port
+#
+hbase_master_ui_port = default('/configurations/hbase-site/hbase.master.info.port', "60010");
+
+#Spark
+spark_historyserver_hosts = default("/clusterHostInfo/spark_jobhistoryserver_hosts", None)
+if type(spark_historyserver_hosts) is list:
+  spark_historyserver_host = spark_historyserver_hosts[0]
+else:
+  spark_historyserver_host = spark_historyserver_hosts
+
+spark_historyserver_ui_port = default("/configurations/spark-defaults/spark.history.ui.port", "18080")
+# Solr
+solr_host=default("/configurations/solr/hostname", None)
+solr_port=default("/configuration/solr/solr-env/solr_port","8983")
+
+# JobHistory mapreduce
+mr_historyserver_address = default("/configurations/mapred-site/mapreduce.jobhistory.webapp.address", None)
+
+
+# server configurations
+knox_conf_dir = ibm_distribution_knox_dir + '/conf'
+knox_data_dir = ibm_distribution_knox_dir +  '/data'
+knox_logs_dir = ibm_distribution_knox_var + '/log/knox'
+knox_pid_dir = status_params.knox_pid_dir
+knox_user = default("/configurations/knox-env/knox_user", "knox")
+knox_group = default("/configurations/knox-env/knox_group", "knox")
+mode = 0644
+knox_pid_file = status_params.knox_pid_file
+ldap_pid_file = status_params.ldap_pid_file
+knox_master_secret = config['configurations']['knox-env']['knox_master_secret']
+knox_master_secret_path = ibm_distribution_knox_dir + '/data/security/master'
+knox_cert_store_path = ibm_distribution_knox_dir + '/data/security/keystores/gateway.jks'
+knox_host_name = config['clusterHostInfo']['knox_gateway_hosts'][0]
+knox_host_name_in_cluster = config['hostname']
+knox_host_port = config['configurations']['gateway-site']['gateway.port']
+topology_template = config['configurations']['topology']['content']
+gateway_log4j = config['configurations']['gateway-log4j']['content']
+ldap_log4j = config['configurations']['ldap-log4j']['content']
+users_ldif = config['configurations']['users-ldif']['content']
+java_home = config['hostLevelParams']['java_home']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+if security_enabled:
+  knox_keytab_path = config['configurations']['knox-env']['knox_keytab_path']
+  _hostname_lowercase = config['hostname'].lower()
+  knox_principal_name = config['configurations']['knox-env']['knox_principal_name'].replace('_HOST',_hostname_lowercase)

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/package/scripts/service_check.py
new file mode 100755
index 0000000..ec915d4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/package/scripts/service_check.py
@@ -0,0 +1,92 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import sys
+import os
+
+
+class KnoxServiceCheck(Script):
+  def service_check(self, env):
+    pass
+
+
+#
+#@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+##class KnoxServiceCheckWindows(KnoxServiceCheck):
+#  def service_check(self, env):
+#    import params
+#    env.set_params(params)
+#
+#    temp_dir = os.path.join(os.path.dirname(params.knox_home), "temp")
+#    validateKnoxFileName = "validateKnoxStatus.py"
+#    validateKnoxFilePath = os.path.join(temp_dir, validateKnoxFileName)
+#    python_executable = sys.executable
+#    validateStatusCmd = "%s %s -p %s -n %s" % (python_executable, validateKnoxFilePath, params.knox_host_port, params.knox_host_name)
+#
+#    print "Test connectivity to knox server"
+#
+#    File(validateKnoxFilePath,
+#         content=StaticFile(validateKnoxFileName)
+#    )
+#
+#    Execute(validateStatusCmd,
+#            tries=3,
+#            try_sleep=5,
+#            timeout=5,
+#            logoutput=True
+#    )
+
+
+#@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class KnoxServiceCheckDefault(KnoxServiceCheck):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    validateKnoxFileName = "validateKnoxStatus.py"
+    validateKnoxFilePath = format("{tmp_dir}/{validateKnoxFileName}")
+    python_executable = sys.executable
+    validateStatusCmd = format("{python_executable} {validateKnoxFilePath} -p {knox_host_port} -n {knox_host_name}")
+    if params.security_enabled:
+      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
+      smoke_cmd = format("{kinit_cmd} {validateStatusCmd}")
+    else:
+      smoke_cmd = validateStatusCmd
+
+    print "Test connectivity to knox server"
+
+    File(validateKnoxFilePath,
+         content=StaticFile(validateKnoxFileName),
+         mode=0755
+    )
+
+    Execute(smoke_cmd,
+            tries=3,
+            try_sleep=5,
+            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+            user=params.smokeuser,
+            timeout=5,
+            logoutput=True
+    )
+
+
+if __name__ == "__main__":
+    KnoxServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/package/scripts/status_params.py
new file mode 100755
index 0000000..fe359e8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/package/scripts/status_params.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+knox_pid_dir = config['configurations']['knox-env']['knox_pid_dir']
+knox_pid_file = format("{knox_pid_dir}/gateway.pid")
+ldap_pid_file = format("{knox_pid_dir}/ldap.pid")
+#if OSCheck.is_windows_family():
+#  knox_gateway_win_service_name = "gateway"
+#  knox_ldap_win_service_name = "ldap"
+#else:
+knox_conf_dir = '/etc/knox/conf'
+#  if Script.is_stack_greater_or_equal("2.2"):
+knox_conf_dir = '/usr/iop/current/knox-server/conf'
+#  knox_pid_dir = config['configurations']['knox-env']['knox_pid_dir']
+#  knox_pid_file = format("{knox_pid_dir}/gateway.pid")
+#  ldap_pid_file = format("{knox_pid_dir}/ldap.pid")
+
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+if security_enabled:
+   knox_keytab_path = config['configurations']['knox-env']['knox_keytab_path']
+   knox_principal_name = config['configurations']['knox-env']['knox_principal_name']
+else:
+   knox_keytab_path = None
+   knox_principal_name = None
+
+hostname = config['hostname'].lower()
+knox_user = default("/configurations/knox-env/knox_user", "knox")
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+temp_dir = Script.get_tmp_dir()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/package/scripts/upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/package/scripts/upgrade.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/package/scripts/upgrade.py
new file mode 100755
index 0000000..66115d3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/package/scripts/upgrade.py
@@ -0,0 +1,72 @@
+
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+import tarfile
+import tempfile
+
+from resource_management.core.logger import Logger
+from resource_management.core.exceptions import Fail
+from resource_management.libraries.functions import tar_archive
+
+BACKUP_TEMP_DIR = "knox-upgrade-backup"
+BACKUP_DATA_ARCHIVE = "knox-data-backup.tar"
+BACKUP_CONF_ARCHIVE = "knox-conf-backup.tar"
+
+def backup_data():
+  """
+  Backs up the knox data as part of the upgrade process.
+  :return: Returns the path to the absolute backup directory.
+  """
+  Logger.info('Backing up Knox data directory before upgrade...')
+  directoryMappings = _get_directory_mappings()
+
+  Logger.info("Directory mappings to backup: {0}".format(str(directoryMappings)))
+
+  absolute_backup_dir = os.path.join(tempfile.gettempdir(), BACKUP_TEMP_DIR)
+  if not os.path.isdir(absolute_backup_dir):
+    os.makedirs(absolute_backup_dir)
+
+  for directory in directoryMappings:
+    if not os.path.isdir(directory):
+      raise Fail("Unable to backup missing directory {0}".format(directory))
+
+    archive = os.path.join(absolute_backup_dir, directoryMappings[directory])
+    Logger.info('Compressing {0} to {1}'.format(directory, archive))
+
+    if os.path.exists(archive):
+      os.remove(archive)
+
+    # backup the directory, following symlinks instead of including them
+    tar_archive.archive_directory_dereference(archive, directory)
+
+  return absolute_backup_dir
+
+
+def _get_directory_mappings():
+  """
+  Gets a dictionary of directory to archive name that represents the
+  directories that need to be backed up and their output tarball archive targets
+  :return:  the dictionary of directory to tarball mappings
+  """
+  import params
+
+  return { params.ibm_distribution_knox_var + "/lib/knox/data/security" : BACKUP_DATA_ARCHIVE,
+           params.knox_conf_dir + "/": BACKUP_CONF_ARCHIVE} # the trailing "/" is important here so as to not include the "conf" folder itself

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/package/templates/krb5JAASLogin.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/package/templates/krb5JAASLogin.conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/package/templates/krb5JAASLogin.conf.j2
new file mode 100755
index 0000000..13191b8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KNOX/package/templates/krb5JAASLogin.conf.j2
@@ -0,0 +1,29 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+com.sun.security.jgss.initiate {
+com.sun.security.auth.module.Krb5LoginModule required
+renewTGT=true
+doNotPrompt=true
+useKeyTab=true
+keyTab="{{knox_keytab_path}}"
+principal="{{knox_principal_name}}"
+isInitiator=true
+storeKey=true
+useTicketCache=true
+client=true;
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/alerts.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/alerts.json
new file mode 100755
index 0000000..32849b3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/OOZIE/alerts.json
@@ -0,0 +1,45 @@
+{
+  "OOZIE": {
+    "service": [],
+    "OOZIE_SERVER": [
+      {
+        "name": "oozie_server_webui",
+        "label": "Oozie Server Web UI",
+        "description": "This host-level alert is triggered if the Oozie server Web UI is unreachable.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "WEB",
+          "uri": {
+            "http": "{{oozie-site/oozie.base.url}}/?user.name={{oozie-env/oozie_user}}",
+            "kerberos_keytab": "{{oozie-site/oozie.authentication.kerberos.keytab}}",
+            "kerberos_principal": "{{oozie-site/oozie.authentication.kerberos.principal}}",
+            "connection_timeout": 5.0
+          },
+          "reporting": {
+            "ok": {
+              "text": "HTTP {0} response in {2:.3f}s"
+            },
+            "warning":{
+              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
+            },
+            "critical": {
+              "text": "Connection failed to {1} ({3})"
+            }
+          }
+        }
+      },
+      {
+        "name": "oozie_server_status",
+        "label": "Oozie Server Status",
+        "description": "This host-level alert is triggered if the Oozie server cannot be determined to be up and responding to client requests.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "SCRIPT",
+          "path": "OOZIE/4.2.0.4.1/package/alerts/alert_check_oozie_server.py"
+        }
+      }
+    ]
+  }
+}


[16/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/YARN_metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/YARN_metrics.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/YARN_metrics.json
new file mode 100755
index 0000000..a66bb34
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/YARN_metrics.json
@@ -0,0 +1,3486 @@
+{
+  "NODEMANAGER": {
+    "Component": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "default": {
+            "metrics/cpu/cpu_idle": {
+              "metric": "cpu_idle",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/cpu/cpu_nice": {
+              "metric": "cpu_nice",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/cpu/cpu_system": {
+              "metric": "cpu_system",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/cpu/cpu_user": {
+              "metric": "cpu_user",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/cpu/cpu_wio": {
+              "metric": "cpu_wio",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/disk/disk_free": {
+              "metric": "disk_free",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/disk/disk_total": {
+              "metric": "disk_total",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/load/load_fifteen": {
+              "metric": "load_fifteen",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/load/load_five": {
+              "metric": "load_five",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/load/load_one": {
+              "metric": "load_one",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/memory/mem_buffered": {
+              "metric": "mem_buffered",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/memory/mem_cached": {
+              "metric": "mem_cached",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/memory/mem_free": {
+              "metric": "mem_free",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/memory/mem_shared": {
+              "metric": "mem_shared",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/memory/mem_total": {
+              "metric": "mem_total",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/memory/swap_free": {
+              "metric": "swap_free",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/network/bytes_in": {
+              "metric": "bytes_in",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/network/bytes_out": {
+              "metric": "bytes_out",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/network/pkts_in": {
+              "metric": "pkts_in",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/network/pkts_out": {
+              "metric": "pkts_out",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/process/proc_run": {
+              "metric": "proc_run",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/process/proc_total": {
+              "metric": "proc_total",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/disk/read_count": {
+              "metric": "read_count",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/disk/write_count": {
+              "metric": "write_count",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/disk/read_bytes": {
+              "metric": "read_bytes",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/disk/write_bytes": {
+              "metric": "write_bytes",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/disk/read_time": {
+              "metric": "read_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/disk/write_time": {
+              "metric": "write_time",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/disk/read_bps":{
+              "metric":"read_bps",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/disk/write_bps":{
+              "metric":"write_bps",
+              "pointInTime":true,
+              "temporal":true,
+              "amsHostMetric":true
+            },
+            "metrics/jvm/memHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/mapred/ShuffleOutputsFailed": {
+              "metric": "mapred.ShuffleMetrics.ShuffleOutputsFailed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/threadsRunnable": {
+              "metric": "jvm.JvmMetrics.ThreadsRunnable",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsNew": {
+              "metric": "jvm.JvmMetrics.ThreadsNew",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthorizationFailures": {
+              "metric": "rpc.rpc.RpcAuthorizationFailures",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_avg_time": {
+              "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_avg_time": {
+              "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/SentBytes": {
+              "metric": "rpc.rpc.SentBytes",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logWarn": {
+              "metric": "jvm.JvmMetrics.LogWarn",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTimedWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/gcCount": {
+              "metric": "jvm.JvmMetrics.GcCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/ReceivedBytes": {
+              "metric": "rpc.rpc.ReceivedBytes",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsBlocked": {
+              "metric": "jvm.JvmMetrics.ThreadsBlocked",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_num_ops": {
+              "metric": "rpc.rpc.RpcQueueTimeNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/AllocatedContainers": {
+              "metric": "yarn.NodeManagerMetrics.AllocatedContainers",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/AllocatedGB": {
+              "metric": "yarn.NodeManagerMetrics.AllocatedGB",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/AvailableGB": {
+              "metric": "yarn.NodeManagerMetrics.AvailableGB",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/AllocatedVCores": {
+              "metric": "yarn.NodeManagerMetrics.AllocatedVCores",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/AvailableVCores": {
+              "metric": "yarn.NodeManagerMetrics.AvailableVCores",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/BadLocalDirs": {
+              "metric": "yarn.NodeManagerMetrics.BadLocalDirs",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/BadLogDirs": {
+              "metric": "yarn.NodeManagerMetrics.BadLogDirs",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainerLaunchDurationAvgTime": {
+              "metric": "yarn.NodeManagerMetrics.ContainerLaunchDurationAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainerLaunchDurationNumOps": {
+              "metric": "yarn.NodeManagerMetrics.ContainerLaunchDurationNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainersCompleted": {
+              "metric": "yarn.NodeManagerMetrics.ContainersCompleted",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainersFailed": {
+              "metric": "yarn.NodeManagerMetrics.ContainersFailed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainersIniting": {
+              "metric": "yarn.NodeManagerMetrics.ContainersIniting",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainersKilled": {
+              "metric": "yarn.NodeManagerMetrics.ContainersKilled",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainersLaunched": {
+              "metric": "yarn.NodeManagerMetrics.ContainersLaunched",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainersRunning": {
+              "metric": "yarn.NodeManagerMetrics.ContainersRunning",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/GoodLocalDirsDiskUtilizationPerc": {
+              "metric": "yarn.NodeManagerMetrics.GoodLocalDirsDiskUtilizationPerc",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/GoodLogDirsDiskUtilizationPerc": {
+              "metric": "yarn.NodeManagerMetrics.GoodLogDirsDiskUtilizationPerc",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/NumOpenConnections": {
+              "metric": "rpc.rpc.NumOpenConnections",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemHeapUsedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsWaiting",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/mapred/ShuffleOutputsOK": {
+              "metric": "mapred.ShuffleMetrics.ShuffleOutputsOK",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_num_ops": {
+              "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/gcTimeMillis": {
+              "metric": "jvm.JvmMetrics.GcTimeMillis",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTerminated": {
+              "metric": "jvm.JvmMetrics.ThreadsTerminated",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/callQueueLen": {
+              "metric": "rpc.rpc.CallQueueLength",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logInfo": {
+              "metric": "jvm.JvmMetrics.LogInfo",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_num_ops": {
+              "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_num_ops": {
+              "metric": "ugi.UgiMetrics.LoginFailureNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/mapred/ShuffleConnections": {
+              "metric": "mapred.ShuffleMetrics.ShuffleConnections",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/logError": {
+              "metric": "jvm.JvmMetrics.LogError",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_avg_time": {
+              "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthorizationSuccesses": {
+              "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logFatal": {
+              "metric": "jvm.JvmMetrics.LogFatal",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_avg_time": {
+              "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/mapred/ShuffleOutputBytes": {
+              "metric": "mapred.ShuffleMetrics.ShuffleOutputBytes",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthenticationSuccesses": {
+              "metric": "rpc.rpc.RpcAuthenticationSuccesses",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthenticationFailures": {
+              "metric": "rpc.rpc.RpcAuthenticationFailures",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcCountCopy": {
+              "metric": "jvm.JvmMetrics.GcCountCopy",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcCountMarkSweepCompact": {
+              "metric": "jvm.JvmMetrics.GcCountMarkSweepCompact",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcTimeMillisCopy": {
+              "metric": "jvm.JvmMetrics.GcTimeMillisCopy",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcTimeMillisMarkSweepCompact": {
+              "metric": "jvm.JvmMetrics.GcTimeMillisMarkSweepCompact",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/MemHeapMaxM": {
+              "metric": "jvm.JvmMetrics.MemHeapMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/MemMaxM": {
+              "metric": "jvm.JvmMetrics.MemMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/MemNonHeapMaxM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/DroppedPubAll": {
+              "metric": "metricssystem.MetricsSystem.DroppedPubAll",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumActiveSinks": {
+              "metric": "metricssystem.MetricsSystem.NumActiveSinks",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumActiveSources": {
+              "metric": "metricssystem.MetricsSystem.NumActiveSources",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumAllSinks": {
+              "metric": "metricssystem.MetricsSystem.NumAllSinks",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumAllSources": {
+              "metric": "metricssystem.MetricsSystem.NumAllSources",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/PublishAvgTime": {
+              "metric": "metricssystem.MetricsSystem.PublishAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/PublishNumOps": {
+              "metric": "metricssystem.MetricsSystem.PublishNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineAvgTime": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineDropped": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineDropped",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineNumOps": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineQsize": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineQsize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/SnapshotAvgTime": {
+              "metric": "metricssystem.MetricsSystem.SnapshotAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/SnapshotNumOps": {
+              "metric": "metricssystem.MetricsSystem.SnapshotNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rpc/RpcClientBackoff": {
+              "metric": "rpc.rpc.RpcClientBackoff",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetContainerStatusesAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.GetContainerStatusesAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetContainerStatusesNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.GetContainerStatusesNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/HeartbeatAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.HeartbeatAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/HeartbeatNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.HeartbeatNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/StartContainersAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.StartContainersAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/StartContainersNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.StartContainersNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/StopContainersAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.StopContainersAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/StopContainersNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.StopContainersNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/UgiMetrics/GetGroupsAvgTime": {
+              "metric": "ugi.UgiMetrics.GetGroupsAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/UgiMetrics/GetGroupsNumOps": {
+              "metric": "ugi.UgiMetrics.GetGroupsNumOps",
+              "pointInTime": true,
+              "temporal": true
+            }
+          }
+        }
+      }
+    ],
+    "HostComponent": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "default": {
+            "metrics/cpu/cpu_idle": {
+              "metric": "cpu_idle",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/cpu/cpu_nice": {
+              "metric": "cpu_nice",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/cpu/cpu_system": {
+              "metric": "cpu_system",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/cpu/cpu_user": {
+              "metric": "cpu_user",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/cpu/cpu_wio": {
+              "metric": "cpu_wio",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/disk/disk_free": {
+              "metric": "disk_free",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/disk/disk_total": {
+              "metric": "disk_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/load/load_fifteen": {
+              "metric": "load_fifteen",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/load/load_five": {
+              "metric": "load_five",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/load/load_one": {
+              "metric": "load_one",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/memory/mem_buffered": {
+              "metric": "mem_buffered",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/memory/mem_cached": {
+              "metric": "mem_cached",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/memory/mem_free": {
+              "metric": "mem_free",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/memory/mem_shared": {
+              "metric": "mem_shared",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/memory/mem_total": {
+              "metric": "mem_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/memory/swap_free": {
+              "metric": "swap_free",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/network/bytes_in": {
+              "metric": "bytes_in",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/network/bytes_out": {
+              "metric": "bytes_out",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/network/pkts_in": {
+              "metric": "pkts_in",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/network/pkts_out": {
+              "metric": "pkts_out",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/process/proc_run": {
+              "metric": "proc_run",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/process/proc_total": {
+              "metric": "proc_total",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/disk/read_count": {
+              "metric": "read_count",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/disk/write_count": {
+              "metric": "write_count",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/disk/read_bytes": {
+              "metric": "read_bytes",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/disk/write_bytes": {
+              "metric": "write_bytes",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/disk/read_time": {
+              "metric": "read_time",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/disk/write_time": {
+              "metric": "write_time",
+              "pointInTime": true,
+              "temporal": true,
+              "amsHostMetric": true
+            },
+            "metrics/jvm/memHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/mapred/ShuffleOutputsFailed": {
+              "metric": "mapred.ShuffleMetrics.ShuffleOutputsFailed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/threadsRunnable": {
+              "metric": "jvm.JvmMetrics.ThreadsRunnable",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsNew": {
+              "metric": "jvm.JvmMetrics.ThreadsNew",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthorizationFailures": {
+              "metric": "rpc.rpc.RpcAuthorizationFailures",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_avg_time": {
+              "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_avg_time": {
+              "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/SentBytes": {
+              "metric": "rpc.rpc.SentBytes",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logWarn": {
+              "metric": "jvm.JvmMetrics.LogWarn",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTimedWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/gcCount": {
+              "metric": "jvm.JvmMetrics.GcCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/ReceivedBytes": {
+              "metric": "rpc.rpc.ReceivedBytes",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsBlocked": {
+              "metric": "jvm.JvmMetrics.ThreadsBlocked",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_num_ops": {
+              "metric": "rpc.rpc.RpcQueueTimeNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/AllocatedContainers": {
+              "metric": "yarn.NodeManagerMetrics.AllocatedContainers",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/AllocatedGB": {
+              "metric": "yarn.NodeManagerMetrics.AllocatedGB",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/AvailableGB": {
+              "metric": "yarn.NodeManagerMetrics.AvailableGB",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/AllocatedVCores": {
+              "metric": "yarn.NodeManagerMetrics.AllocatedVCores",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/AvailableVCores": {
+              "metric": "yarn.NodeManagerMetrics.AvailableVCores",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/BadLocalDirs": {
+              "metric": "yarn.NodeManagerMetrics.BadLocalDirs",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/BadLogDirs": {
+              "metric": "yarn.NodeManagerMetrics.BadLogDirs",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainerLaunchDurationAvgTime": {
+              "metric": "yarn.NodeManagerMetrics.ContainerLaunchDurationAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainerLaunchDurationNumOps": {
+              "metric": "yarn.NodeManagerMetrics.ContainerLaunchDurationNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainersCompleted": {
+              "metric": "yarn.NodeManagerMetrics.ContainersCompleted",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainersFailed": {
+              "metric": "yarn.NodeManagerMetrics.ContainersFailed",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainersIniting": {
+              "metric": "yarn.NodeManagerMetrics.ContainersIniting",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainersKilled": {
+              "metric": "yarn.NodeManagerMetrics.ContainersKilled",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainersLaunched": {
+              "metric": "yarn.NodeManagerMetrics.ContainersLaunched",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ContainersRunning": {
+              "metric": "yarn.NodeManagerMetrics.ContainersRunning",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/GoodLocalDirsDiskUtilizationPerc": {
+              "metric": "yarn.NodeManagerMetrics.GoodLocalDirsDiskUtilizationPerc",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/GoodLogDirsDiskUtilizationPerc": {
+              "metric": "yarn.NodeManagerMetrics.GoodLogDirsDiskUtilizationPerc",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/NumOpenConnections": {
+              "metric": "rpc.rpc.NumOpenConnections",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemHeapUsedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsWaiting",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/mapred/ShuffleOutputsOK": {
+              "metric": "mapred.ShuffleMetrics.ShuffleOutputsOK",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_num_ops": {
+              "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/gcTimeMillis": {
+              "metric": "jvm.JvmMetrics.GcTimeMillis",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTerminated": {
+              "metric": "jvm.JvmMetrics.ThreadsTerminated",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/callQueueLen": {
+              "metric": "rpc.rpc.CallQueueLength",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logInfo": {
+              "metric": "jvm.JvmMetrics.LogInfo",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_num_ops": {
+              "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_num_ops": {
+              "metric": "ugi.UgiMetrics.LoginFailureNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/mapred/ShuffleConnections": {
+              "metric": "mapred.ShuffleMetrics.ShuffleConnections",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/logError": {
+              "metric": "jvm.JvmMetrics.LogError",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_avg_time": {
+              "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthorizationSuccesses": {
+              "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logFatal": {
+              "metric": "jvm.JvmMetrics.LogFatal",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_avg_time": {
+              "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/mapred/ShuffleOutputBytes": {
+              "metric": "mapred.ShuffleMetrics.ShuffleOutputBytes",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthenticationSuccesses": {
+              "metric": "rpc.rpc.RpcAuthenticationSuccesses",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthenticationFailures": {
+              "metric": "rpc.rpc.RpcAuthenticationFailures",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcCountCopy": {
+              "metric": "jvm.JvmMetrics.GcCountCopy",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcCountMarkSweepCompact": {
+              "metric": "jvm.JvmMetrics.GcCountMarkSweepCompact",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcTimeMillisCopy": {
+              "metric": "jvm.JvmMetrics.GcTimeMillisCopy",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcTimeMillisMarkSweepCompact": {
+              "metric": "jvm.JvmMetrics.GcTimeMillisMarkSweepCompact",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/MemHeapMaxM": {
+              "metric": "jvm.JvmMetrics.MemHeapMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/MemMaxM": {
+              "metric": "jvm.JvmMetrics.MemMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/MemNonHeapMaxM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/DroppedPubAll": {
+              "metric": "metricssystem.MetricsSystem.DroppedPubAll",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumActiveSinks": {
+              "metric": "metricssystem.MetricsSystem.NumActiveSinks",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumActiveSources": {
+              "metric": "metricssystem.MetricsSystem.NumActiveSources",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumAllSinks": {
+              "metric": "metricssystem.MetricsSystem.NumAllSinks",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumAllSources": {
+              "metric": "metricssystem.MetricsSystem.NumAllSources",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/PublishAvgTime": {
+              "metric": "metricssystem.MetricsSystem.PublishAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/PublishNumOps": {
+              "metric": "metricssystem.MetricsSystem.PublishNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineAvgTime": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineDropped": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineDropped",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineNumOps": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineQsize": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineQsize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/SnapshotAvgTime": {
+              "metric": "metricssystem.MetricsSystem.SnapshotAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/SnapshotNumOps": {
+              "metric": "metricssystem.MetricsSystem.SnapshotNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rpc/RpcClientBackoff": {
+              "metric": "rpc.rpc.RpcClientBackoff",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetContainerStatusesAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.GetContainerStatusesAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetContainerStatusesNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.GetContainerStatusesNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/HeartbeatAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.HeartbeatAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/HeartbeatNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.HeartbeatNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/StartContainersAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.StartContainersAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/StartContainersNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.StartContainersNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/StopContainersAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.StopContainersAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/StopContainersNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.StopContainersNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/UgiMetrics/GetGroupsAvgTime": {
+              "metric": "ugi.UgiMetrics.GetGroupsAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/UgiMetrics/GetGroupsNumOps": {
+              "metric": "ugi.UgiMetrics.GetGroupsNumOps",
+              "pointInTime": true,
+              "temporal": true
+            }
+          }
+        }
+      },
+      {
+        "type": "jmx",
+        "metrics": {
+          "default": {
+            "metrics/jvm/memHeapCommittedM": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemHeapCommittedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsRunnable": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsRunnable",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsNew": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsNew",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/memNonHeapCommittedM": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemNonHeapCommittedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/callQueueLen": {
+              "metric": "Hadoop:service=NodeManager,name=RpcActivity.CallQueueLength",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rpcAuthorizationFailures": {
+              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthorizationFailures",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTime_avg_time": {
+              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcQueueTimeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/ugi/loginSuccess_avg_time": {
+              "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginSuccessAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/SentBytes": {
+              "metric": "Hadoop:service=NodeManager,name=RpcActivity.SentBytes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/logInfo": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogInfo",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/memNonHeapUsedM": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemNonHeapUsedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/logWarn": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogWarn",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/ugi/loginFailure_num_ops": {
+              "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginFailureNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsTimedWaiting": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsTimedWaiting",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcProcessingTime_num_ops": {
+              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcProcessingTimeNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/gcCount": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.GcCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/ReceivedBytes": {
+              "metric": "Hadoop:service=NodeManager,name=RpcActivity.ReceivedBytes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsBlocked": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsBlocked",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTime_num_ops": {
+              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcQueueTimeNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/logError": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogError",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/ugi/loginFailure_avg_time": {
+              "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginFailureAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rpcAuthorizationSuccesses": {
+              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthorizationSuccesses",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/NumOpenConnections": {
+              "metric": "Hadoop:service=NodeManager,name=RpcActivity.NumOpenConnections",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/memHeapUsedM": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemHeapUsedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/logFatal": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogFatal",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcProcessingTime_avg_time": {
+              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcProcessingTimeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsWaiting": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsWaiting",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/gcTimeMillis": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.GcTimeMillis",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/ugi/loginSuccess_num_ops": {
+              "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginSuccessNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rpcAuthenticationSuccesses": {
+              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthenticationSuccesses",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rpcAuthenticationFailures": {
+              "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthenticationFailures",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsTerminated": {
+              "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsTerminated",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/nodemanager/GoodLocalDirsDiskUtilizationPerc": {
+              "metric": "Hadoop:service=NodeManager,name=NodeManagerMetrics.GoodLocalDirsDiskUtilizationPerc",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/nodemanager/GoodLogDirsDiskUtilizationPerc": {
+              "metric": "Hadoop:service=NodeManager,name=NodeManagerMetrics.GoodLogDirsDiskUtilizationPerc",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/nodemanager/AllocatedGB": {
+              "metric": "Hadoop:service=NodeManager,name=NodeManagerMetrics.AllocatedGB",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/nodemanager/AllocatedVCores": {
+              "metric": "Hadoop:service=NodeManager,name=NodeManagerMetrics.AllocatedVCores",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/nodemanager/BadLocalDirs": {
+              "metric": "Hadoop:service=NodeManager,name=NodeManagerMetrics.BadLocalDirs",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/nodemanager/BadLogDirs": {
+              "metric": "Hadoop:service=NodeManager,name=NodeManagerMetrics.BadLogDirs",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/nodemanager/ContainersFailed": {
+              "metric": "Hadoop:service=NodeManager,name=NodeManagerMetrics.ContainersFailed",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/nodemanager/AllocatedContainers": {
+              "metric": "Hadoop:service=NodeManager,name=NodeManagerMetrics.AllocatedContainers",
+              "pointInTime": true,
+              "temporal": false
+            }
+          }
+        }
+      }
+    ]
+  },
+  "RESOURCEMANAGER": {
+    "Component": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "default": {
+            "metrics/rpcdetailed/FinishApplicationMasterNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.FinishApplicationMasterNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsCompleted": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AppsCompleted",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/ClusterMetrics/NumUnhealthyNMs": {
+              "metric": "yarn.ClusterMetrics.NumUnhealthyNMs",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsRunnable": {
+              "metric": "jvm.JvmMetrics.ThreadsRunnable",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/ClusterMetrics/NumRebootedNMs": {
+              "metric": "yarn.ClusterMetrics.NumRebootedNMs",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsNew": {
+              "metric": "jvm.JvmMetrics.ThreadsNew",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsSubmitted": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AppsSubmitted",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/ClusterMetrics/NumLostNMs": {
+              "metric": "yarn.ClusterMetrics.NumLostNMs",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_avg_time": {
+              "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_avg_time": {
+              "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AllocatedContainers": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AllocatedContainers",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/SentBytes": {
+              "metric": "rpc.rpc.SentBytes",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsKilled": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AppsKilled",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/ClusterMetrics/NumActiveNMs": {
+              "metric": "yarn.ClusterMetrics.NumActiveNMs",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logWarn": {
+              "metric": "jvm.JvmMetrics.LogWarn",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTimedWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsFailed": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AppsFailed",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/gcCount": {
+              "metric": "jvm.JvmMetrics.GcCount",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/ReceivedBytes": {
+              "metric": "rpc.rpc.ReceivedBytes",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/AllocateNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.AllocateNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/gcCountMarkSweepCompact": {
+              "metric": "jvm.JvmMetrics.GcCountMarkSweepCompact",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/threadsBlocked": {
+              "metric": "jvm.JvmMetrics.ThreadsBlocked",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsRunning": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AppsRunning",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/ActiveApplications": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).ActiveApplications",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AMResourceLimitMB": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AMResourceLimitMB",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AMResourceLimitVCores": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AMResourceLimitVCores",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/ActiveUsers": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).ActiveUsers",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AggregateContainersAllocated": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AggregateContainersAllocated",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AggregateContainersReleased": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AggregateContainersReleased",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AllocatedVCores": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AllocatedVCores",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppAttemptFirstContainerAllocationDelayAvgTime": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AppAttemptFirstContainerAllocationDelayAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppAttemptFirstContainerAllocationDelayNumOps": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AppAttemptFirstContainerAllocationDelayNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AvailableVCores": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AvailableVCores",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/PendingVCores": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).PendingVCores",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/ReservedMB": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).ReservedMB",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/ReservedVCores": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).ReservedVCores",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/UsedAMResourceMB": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).UsedAMResourceMB",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/UsedAMResourceVCores": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).UsedAMResourceVCores",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/running_0": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).running_0",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/running_1440": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).running_1440",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/running_300": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).running_300",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/running_60": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).running_60",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/ClusterMetrics/NumDecommissionedNMs": {
+              "metric": "yarn.ClusterMetrics.NumDecommissionedNMs",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcQueueTime_num_ops": {
+              "metric": "rpc.rpc.RpcQueueTimeNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/NumOpenConnections": {
+              "metric": "rpc.rpc.NumOpenConnections",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemHeapUsedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsWaiting": {
+              "metric": "jvm.JvmMetrics.ThreadsWaiting",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginSuccess_num_ops": {
+              "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/gcTimeMillisCopy": {
+              "metric": "jvm.JvmMetrics.GcTimeMillisCopy",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/gcTimeMillis": {
+              "metric": "jvm.JvmMetrics.GcTimeMillis",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/PendingContainers": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).PendingContainers",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/memMaxM": {
+              "metric": "jvm.JvmMetrics.MemMaxM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/threadsTerminated": {
+              "metric": "jvm.JvmMetrics.ThreadsTerminated",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/AllocateAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.AllocateAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/memNonHeapCommittedM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/GetApplicationReportNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.GetApplicationReportNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/FinishApplicationMasterAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.FinishApplicationMasterAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/callQueueLen": {
+              "metric": "rpc.rpc.CallQueueLength",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/RegisterApplicationMasterNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.RegisterApplicationMasterNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AvailableMB": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AvailableMB",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/PendingMB": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).PendingMB",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logInfo": {
+              "metric": "jvm.JvmMetrics.LogInfo",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_num_ops": {
+              "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_num_ops": {
+              "metric": "ugi.UgiMetrics.LoginFailureNumOps",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/SubmitApplicationAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.SubmitApplicationAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/GetNewApplicationNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.GetNewApplicationNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsPending": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AppsPending",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/gcCountCopy": {
+              "metric": "jvm.JvmMetrics.GcCountCopy",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/logError": {
+              "metric": "jvm.JvmMetrics.LogError",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/ugi/loginFailure_avg_time": {
+              "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/SubmitApplicationNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.SubmitApplicationNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/gcTimeMillisMarkSweepCompact": {
+              "metric": "jvm.JvmMetrics.GcTimeMillisMarkSweepCompact",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rpcAuthorizationSuccesses": {
+              "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AllocatedMB": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).AllocatedMB",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/logFatal": {
+              "metric": "jvm.JvmMetrics.LogFatal",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpc/RpcProcessingTime_avg_time": {
+              "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/GetApplicationReportAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.GetApplicationReportAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/NodeHeartbeatAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.NodeHeartbeatAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/GetNewApplicationAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.GetNewApplicationAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/RegisterApplicationMasterAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.RegisterApplicationMasterAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/ReservedContainers": {
+              "metric": "yarn.QueueMetrics.Queue=(.+).ReservedContainers",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/GcCountMarkSweepCompact": {
+              "metric": "jvm.JvmMetrics.GcCountMarkSweepCompact",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/MemHeapMaxM": {
+              "metric": "jvm.JvmMetrics.MemHeapMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/MemNonHeapMaxM": {
+              "metric": "jvm.JvmMetrics.MemNonHeapMaxM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/JvmMetrics/ThreadsBlocked": {
+              "metric": "jvm.JvmMetrics.ThreadsBlocked",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/DroppedPubAll": {
+              "metric": "metricssystem.MetricsSystem.DroppedPubAll",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumActiveSinks": {
+              "metric": "metricssystem.MetricsSystem.NumActiveSinks",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumActiveSources": {
+              "metric": "metricssystem.MetricsSystem.NumActiveSources",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumAllSinks": {
+              "metric": "metricssystem.MetricsSystem.NumAllSinks",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/NumAllSources": {
+              "metric": "metricssystem.MetricsSystem.NumAllSources",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/PublishAvgTime": {
+              "metric": "metricssystem.MetricsSystem.PublishAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/PublishNumOps": {
+              "metric": "metricssystem.MetricsSystem.PublishNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineAvgTime": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineDropped": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineDropped",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineNumOps": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/Sink_timelineQsize": {
+              "metric": "metricssystem.MetricsSystem.Sink_timelineQsize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/SnapshotAvgTime": {
+              "metric": "metricssystem.MetricsSystem.SnapshotAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/metricssystem/MetricsSystem/SnapshotNumOps": {
+              "metric": "metricssystem.MetricsSystem.SnapshotNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rpc/RpcAuthenticationFailures": {
+              "metric": "rpc.rpc.RpcAuthenticationFailures",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rpc/RpcAuthenticationSuccesses": {
+              "metric": "rpc.rpc.RpcAuthenticationSuccesses",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rpc/RpcAuthorizationFailures": {
+              "metric": "rpc.rpc.RpcAuthorizationFailures",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpc/rpc/RpcClientBackoff": {
+              "metric": "rpc.rpc.RpcClientBackoff",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/AllocateNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.AllocateNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetClusterMetricsAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.GetClusterMetricsAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetClusterMetricsNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.GetClusterMetricsNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetClusterNodesAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.GetClusterNodesAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetClusterNodesNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.GetClusterNodesNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetQueueInfoAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.GetQueueInfoAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetQueueInfoNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.GetQueueInfoNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetQueueUserAclsAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.GetQueueUserAclsAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/GetQueueUserAclsNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.GetQueueUserAclsNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/NodeHeartbeatNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.NodeHeartbeatNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/RegisterNodeManagerAvgTime": {
+              "metric": "rpcdetailed.rpcdetailed.RegisterNodeManagerAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/rpcdetailed/rpcdetailed/RegisterNodeManagerNumOps": {
+              "metric": "rpcdetailed.rpcdetailed.RegisterNodeManagerNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/UgiMetrics/GetGroupsAvgTime": {
+              "metric": "ugi.UgiMetrics.GetGroupsAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/ugi/UgiMetrics/GetGroupsNumOps": {
+              "metric": "ugi.UgiMetrics.GetGroupsNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ClusterMetrics/AMLaunchDelayAvgTime": {
+              "metric": "yarn.ClusterMetrics.AMLaunchDelayAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ClusterMetrics/AMLaunchDelayNumOps": {
+              "metric": "yarn.ClusterMetrics.AMLaunchDelayNumOps",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ClusterMetrics/AMRegisterDelayAvgTime": {
+              "metric": "yarn.ClusterMetrics.AMRegisterDelayAvgTime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/yarn/ClusterMetrics/AMRegisterDelayNumOps": {
+              "metric": "yarn.ClusterMetrics.AMRegisterDelayNumOps",
+              "pointInTime": true,
+              "temporal": true
+            }
+          }
+        }
+      },
+      {
+        "type": "jmx",
+        "metrics": {
+          "default": {
+            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsFailed": {
+              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsFailed",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/rm_metrics/cluster/rebootedNMcount": {
+              "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumRebootedNMs",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/memHeapCommittedM": {
+              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemHeapCommittedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/yarn/ClusterMetrics/NumUnhealthyNMs": {
+              "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumUnhealthyNMs",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/HeapMemoryMax": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/HeapMemoryUsed": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/NonHeapMemoryMax": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/NonHeapMemoryUsed": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsRunnable": {
+              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsRunnable",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsNew": {
+              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsNew",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/yarn/ClusterMetrics/NumRebootedNMs": {
+              "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumRebootedNMs",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/runtime/StartTime": {
+              "metric": "java.lang:type=Runtime.StartTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsKilled": {
+              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsKilled",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/rpcAuthorizationFailures": {
+              "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcAuthorizationFailures",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AggregateContainersAllocated": {
+              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AggregateContainersAllocated",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/yarn/ClusterMetrics/NumLostNMs": {
+              "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumLostNMs",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/ugi/loginSuccess_avg_time": {
+              "metric": "Hadoop:service=ResourceManager,name=UgiMetrics.LoginSuccessAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "ServiceComponentInfo/StartTime": {
+              "metric": "java.lang:type=Runtime.StartTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTime_avg_time": {
+              "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcQueueTimeAvgTime",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ReservedContainers": {
+              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ReservedContainers",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsSubmitted": {
+              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsSubmitted",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/SentBytes": {
+              "metric": "Hadoop:service=ResourceManager,name=RpcActivity.SentBytes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/yarn/ClusterMetrics/NumActiveNMs": {
+              "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumActiveNMs",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/running_300": {
+              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).running_300",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/memNonHeapUsedM": {
+              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemNonHeapUsedM",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/logWarn": {
+              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.LogWarn",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsTimedWaiting": {
+              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsTimedWaiting",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/gcCount": {
+              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.GcCount",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/ReceivedBytes": {
+              "metric": "Hadoop:service=ResourceManager,name=RpcActivity.ReceivedBytes",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/threadsBlocked": {
+              "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsBlocked",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/running_60": {
+              "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).running_60",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/rpc/RpcQueueTime_num_ops": {
+              "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcQueueTimeNumOps",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/yarn

<TRUNCATED>

[29/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/hive-schema-0.14.0.postgres.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/hive-schema-0.14.0.postgres.sql b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/hive-schema-0.14.0.postgres.sql
new file mode 100755
index 0000000..5e238b2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/hive-schema-0.14.0.postgres.sql
@@ -0,0 +1,1541 @@
+--
+-- PostgreSQL database dump
+--
+
+SET statement_timeout = 0;
+SET client_encoding = 'UTF8';
+SET standard_conforming_strings = off;
+SET check_function_bodies = false;
+SET client_min_messages = warning;
+SET escape_string_warning = off;
+
+SET search_path = public, pg_catalog;
+
+SET default_tablespace = '';
+
+SET default_with_oids = false;
+
+--
+-- Name: BUCKETING_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "BUCKETING_COLS" (
+    "SD_ID" bigint NOT NULL,
+    "BUCKET_COL_NAME" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: CDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "CDS" (
+    "CD_ID" bigint NOT NULL
+);
+
+
+--
+-- Name: COLUMNS_OLD; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "COLUMNS_OLD" (
+    "SD_ID" bigint NOT NULL,
+    "COMMENT" character varying(256) DEFAULT NULL::character varying,
+    "COLUMN_NAME" character varying(128) NOT NULL,
+    "TYPE_NAME" character varying(4000) NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: COLUMNS_V2; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "COLUMNS_V2" (
+    "CD_ID" bigint NOT NULL,
+    "COMMENT" character varying(4000),
+    "COLUMN_NAME" character varying(128) NOT NULL,
+    "TYPE_NAME" character varying(4000),
+    "INTEGER_IDX" integer NOT NULL
+);
+
+
+--
+-- Name: DATABASE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DATABASE_PARAMS" (
+    "DB_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(180) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: DBS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DBS" (
+    "DB_ID" bigint NOT NULL,
+    "DESC" character varying(4000) DEFAULT NULL::character varying,
+    "DB_LOCATION_URI" character varying(4000) NOT NULL,
+    "NAME" character varying(128) DEFAULT NULL::character varying,
+    "OWNER_NAME" character varying(128) DEFAULT NULL::character varying,
+    "OWNER_TYPE" character varying(10) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: DB_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DB_PRIVS" (
+    "DB_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "DB_ID" bigint,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "DB_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: GLOBAL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "GLOBAL_PRIVS" (
+    "USER_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "USER_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: IDXS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "IDXS" (
+    "INDEX_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "DEFERRED_REBUILD" boolean NOT NULL,
+    "INDEX_HANDLER_CLASS" character varying(4000) DEFAULT NULL::character varying,
+    "INDEX_NAME" character varying(128) DEFAULT NULL::character varying,
+    "INDEX_TBL_ID" bigint,
+    "LAST_ACCESS_TIME" bigint NOT NULL,
+    "ORIG_TBL_ID" bigint,
+    "SD_ID" bigint
+);
+
+
+--
+-- Name: INDEX_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "INDEX_PARAMS" (
+    "INDEX_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: NUCLEUS_TABLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "NUCLEUS_TABLES" (
+    "CLASS_NAME" character varying(128) NOT NULL,
+    "TABLE_NAME" character varying(128) NOT NULL,
+    "TYPE" character varying(4) NOT NULL,
+    "OWNER" character varying(2) NOT NULL,
+    "VERSION" character varying(20) NOT NULL,
+    "INTERFACE_NAME" character varying(255) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PARTITIONS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITIONS" (
+    "PART_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "LAST_ACCESS_TIME" bigint NOT NULL,
+    "PART_NAME" character varying(767) DEFAULT NULL::character varying,
+    "SD_ID" bigint,
+    "TBL_ID" bigint
+);
+
+
+--
+-- Name: PARTITION_EVENTS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_EVENTS" (
+    "PART_NAME_ID" bigint NOT NULL,
+    "DB_NAME" character varying(128),
+    "EVENT_TIME" bigint NOT NULL,
+    "EVENT_TYPE" integer NOT NULL,
+    "PARTITION_NAME" character varying(767),
+    "TBL_NAME" character varying(128)
+);
+
+
+--
+-- Name: PARTITION_KEYS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_KEYS" (
+    "TBL_ID" bigint NOT NULL,
+    "PKEY_COMMENT" character varying(4000) DEFAULT NULL::character varying,
+    "PKEY_NAME" character varying(128) NOT NULL,
+    "PKEY_TYPE" character varying(767) NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: PARTITION_KEY_VALS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_KEY_VALS" (
+    "PART_ID" bigint NOT NULL,
+    "PART_KEY_VAL" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: PARTITION_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_PARAMS" (
+    "PART_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PART_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_COL_PRIVS" (
+    "PART_COLUMN_GRANT_ID" bigint NOT NULL,
+    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_ID" bigint,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_COL_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PART_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_PRIVS" (
+    "PART_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_ID" bigint,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: ROLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "ROLES" (
+    "ROLE_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "OWNER_NAME" character varying(128) DEFAULT NULL::character varying,
+    "ROLE_NAME" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: ROLE_MAP; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "ROLE_MAP" (
+    "ROLE_GRANT_ID" bigint NOT NULL,
+    "ADD_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "ROLE_ID" bigint
+);
+
+
+--
+-- Name: SDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SDS" (
+    "SD_ID" bigint NOT NULL,
+    "INPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
+    "IS_COMPRESSED" boolean NOT NULL,
+    "LOCATION" character varying(4000) DEFAULT NULL::character varying,
+    "NUM_BUCKETS" bigint NOT NULL,
+    "OUTPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
+    "SERDE_ID" bigint,
+    "CD_ID" bigint,
+    "IS_STOREDASSUBDIRECTORIES" boolean NOT NULL
+);
+
+
+--
+-- Name: SD_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SD_PARAMS" (
+    "SD_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: SEQUENCE_TABLE; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SEQUENCE_TABLE" (
+    "SEQUENCE_NAME" character varying(255) NOT NULL,
+    "NEXT_VAL" bigint NOT NULL
+);
+
+
+--
+-- Name: SERDES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SERDES" (
+    "SERDE_ID" bigint NOT NULL,
+    "NAME" character varying(128) DEFAULT NULL::character varying,
+    "SLIB" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: SERDE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SERDE_PARAMS" (
+    "SERDE_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: SORT_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SORT_COLS" (
+    "SD_ID" bigint NOT NULL,
+    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+    "ORDER" bigint NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: TABLE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TABLE_PARAMS" (
+    "TBL_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: TBLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBLS" (
+    "TBL_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "DB_ID" bigint,
+    "LAST_ACCESS_TIME" bigint NOT NULL,
+    "OWNER" character varying(767) DEFAULT NULL::character varying,
+    "RETENTION" bigint NOT NULL,
+    "SD_ID" bigint,
+    "TBL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "TBL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "VIEW_EXPANDED_TEXT" text,
+    "VIEW_ORIGINAL_TEXT" text
+);
+
+
+--
+-- Name: TBL_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBL_COL_PRIVS" (
+    "TBL_COLUMN_GRANT_ID" bigint NOT NULL,
+    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "TBL_COL_PRIV" character varying(128) DEFAULT NULL::character varying,
+    "TBL_ID" bigint
+);
+
+
+--
+-- Name: TBL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBL_PRIVS" (
+    "TBL_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "TBL_PRIV" character varying(128) DEFAULT NULL::character varying,
+    "TBL_ID" bigint
+);
+
+
+--
+-- Name: TYPES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TYPES" (
+    "TYPES_ID" bigint NOT NULL,
+    "TYPE_NAME" character varying(128) DEFAULT NULL::character varying,
+    "TYPE1" character varying(767) DEFAULT NULL::character varying,
+    "TYPE2" character varying(767) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: TYPE_FIELDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TYPE_FIELDS" (
+    "TYPE_NAME" bigint NOT NULL,
+    "COMMENT" character varying(256) DEFAULT NULL::character varying,
+    "FIELD_NAME" character varying(128) NOT NULL,
+    "FIELD_TYPE" character varying(767) NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_STRING_LIST" (
+    "STRING_LIST_ID" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_STRING_LIST_VALUES" (
+    "STRING_LIST_ID" bigint NOT NULL,
+    "STRING_LIST_VALUE" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_COL_NAMES" (
+    "SD_ID" bigint NOT NULL,
+    "SKEWED_COL_NAME" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_COL_VALUE_LOC_MAP" (
+    "SD_ID" bigint NOT NULL,
+    "STRING_LIST_ID_KID" bigint NOT NULL,
+    "LOCATION" character varying(4000) DEFAULT NULL::character varying
+);
+
+CREATE TABLE "SKEWED_VALUES" (
+    "SD_ID_OID" bigint NOT NULL,
+    "STRING_LIST_ID_EID" bigint NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: TAB_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE  "MASTER_KEYS"
+(
+    "KEY_ID" SERIAL,
+    "MASTER_KEY" varchar(767) NULL,
+    PRIMARY KEY ("KEY_ID")
+);
+
+CREATE TABLE  "DELEGATION_TOKENS"
+(
+    "TOKEN_IDENT" varchar(767) NOT NULL,
+    "TOKEN" varchar(767) NULL,
+    PRIMARY KEY ("TOKEN_IDENT")
+);
+
+CREATE TABLE "TAB_COL_STATS" (
+ "CS_ID" bigint NOT NULL,
+ "DB_NAME" character varying(128) DEFAULT NULL::character varying,
+ "TABLE_NAME" character varying(128) DEFAULT NULL::character varying,
+ "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+ "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "TBL_ID" bigint NOT NULL,
+ "LONG_LOW_VALUE" bigint,
+ "LONG_HIGH_VALUE" bigint,
+ "DOUBLE_LOW_VALUE" double precision,
+ "DOUBLE_HIGH_VALUE" double precision,
+ "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "NUM_NULLS" bigint NOT NULL,
+ "NUM_DISTINCTS" bigint,
+ "AVG_COL_LEN" double precision,
+ "MAX_COL_LEN" bigint,
+ "NUM_TRUES" bigint,
+ "NUM_FALSES" bigint,
+ "LAST_ANALYZED" bigint NOT NULL
+);
+
+--
+-- Table structure for VERSION
+--
+CREATE TABLE "VERSION" (
+  "VER_ID" bigint,
+  "SCHEMA_VERSION" character varying(127) NOT NULL,
+  "VERSION_COMMENT" character varying(255) NOT NULL
+);
+
+--
+-- Name: PART_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_COL_STATS" (
+ "CS_ID" bigint NOT NULL,
+ "DB_NAME" character varying(128) DEFAULT NULL::character varying,
+ "TABLE_NAME" character varying(128) DEFAULT NULL::character varying,
+ "PARTITION_NAME" character varying(767) DEFAULT NULL::character varying,
+ "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+ "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "PART_ID" bigint NOT NULL,
+ "LONG_LOW_VALUE" bigint,
+ "LONG_HIGH_VALUE" bigint,
+ "DOUBLE_LOW_VALUE" double precision,
+ "DOUBLE_HIGH_VALUE" double precision,
+ "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "NUM_NULLS" bigint NOT NULL,
+ "NUM_DISTINCTS" bigint,
+ "AVG_COL_LEN" double precision,
+ "MAX_COL_LEN" bigint,
+ "NUM_TRUES" bigint,
+ "NUM_FALSES" bigint,
+ "LAST_ANALYZED" bigint NOT NULL
+);
+
+--
+-- Table structure for FUNCS
+--
+CREATE TABLE "FUNCS" (
+  "FUNC_ID" BIGINT NOT NULL,
+  "CLASS_NAME" VARCHAR(4000),
+  "CREATE_TIME" INTEGER NOT NULL,
+  "DB_ID" BIGINT,
+  "FUNC_NAME" VARCHAR(128),
+  "FUNC_TYPE" INTEGER NOT NULL,
+  "OWNER_NAME" VARCHAR(128),
+  "OWNER_TYPE" VARCHAR(10),
+  PRIMARY KEY ("FUNC_ID")
+);
+
+--
+-- Table structure for FUNC_RU
+--
+CREATE TABLE "FUNC_RU" (
+  "FUNC_ID" BIGINT NOT NULL,
+  "RESOURCE_TYPE" INTEGER NOT NULL,
+  "RESOURCE_URI" VARCHAR(4000),
+  "INTEGER_IDX" INTEGER NOT NULL,
+  PRIMARY KEY ("FUNC_ID", "INTEGER_IDX")
+);
+
+--
+-- Name: BUCKETING_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "BUCKETING_COLS"
+    ADD CONSTRAINT "BUCKETING_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+
+--
+-- Name: CDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "CDS"
+    ADD CONSTRAINT "CDS_pkey" PRIMARY KEY ("CD_ID");
+
+
+--
+-- Name: COLUMNS_V2_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "COLUMNS_V2"
+    ADD CONSTRAINT "COLUMNS_V2_pkey" PRIMARY KEY ("CD_ID", "COLUMN_NAME");
+
+
+--
+-- Name: COLUMNS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "COLUMNS_OLD"
+    ADD CONSTRAINT "COLUMNS_pkey" PRIMARY KEY ("SD_ID", "COLUMN_NAME");
+
+
+--
+-- Name: DATABASE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DATABASE_PARAMS"
+    ADD CONSTRAINT "DATABASE_PARAMS_pkey" PRIMARY KEY ("DB_ID", "PARAM_KEY");
+
+
+--
+-- Name: DBPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+    ADD CONSTRAINT "DBPRIVILEGEINDEX" UNIQUE ("DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: DBS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DBS"
+    ADD CONSTRAINT "DBS_pkey" PRIMARY KEY ("DB_ID");
+
+
+--
+-- Name: DB_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+    ADD CONSTRAINT "DB_PRIVS_pkey" PRIMARY KEY ("DB_GRANT_ID");
+
+
+--
+-- Name: GLOBALPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "GLOBAL_PRIVS"
+    ADD CONSTRAINT "GLOBALPRIVILEGEINDEX" UNIQUE ("PRINCIPAL_NAME", "PRINCIPAL_TYPE", "USER_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: GLOBAL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "GLOBAL_PRIVS"
+    ADD CONSTRAINT "GLOBAL_PRIVS_pkey" PRIMARY KEY ("USER_GRANT_ID");
+
+
+--
+-- Name: IDXS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_pkey" PRIMARY KEY ("INDEX_ID");
+
+
+--
+-- Name: INDEX_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "INDEX_PARAMS"
+    ADD CONSTRAINT "INDEX_PARAMS_pkey" PRIMARY KEY ("INDEX_ID", "PARAM_KEY");
+
+
+--
+-- Name: NUCLEUS_TABLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "NUCLEUS_TABLES"
+    ADD CONSTRAINT "NUCLEUS_TABLES_pkey" PRIMARY KEY ("CLASS_NAME");
+
+
+--
+-- Name: PARTITIONS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "PARTITIONS_pkey" PRIMARY KEY ("PART_ID");
+
+
+--
+-- Name: PARTITION_EVENTS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_EVENTS"
+    ADD CONSTRAINT "PARTITION_EVENTS_pkey" PRIMARY KEY ("PART_NAME_ID");
+
+
+--
+-- Name: PARTITION_KEYS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_KEYS"
+    ADD CONSTRAINT "PARTITION_KEYS_pkey" PRIMARY KEY ("TBL_ID", "PKEY_NAME");
+
+
+--
+-- Name: PARTITION_KEY_VALS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_KEY_VALS"
+    ADD CONSTRAINT "PARTITION_KEY_VALS_pkey" PRIMARY KEY ("PART_ID", "INTEGER_IDX");
+
+
+--
+-- Name: PARTITION_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_PARAMS"
+    ADD CONSTRAINT "PARTITION_PARAMS_pkey" PRIMARY KEY ("PART_ID", "PARAM_KEY");
+
+
+--
+-- Name: PART_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PART_COL_PRIVS"
+    ADD CONSTRAINT "PART_COL_PRIVS_pkey" PRIMARY KEY ("PART_COLUMN_GRANT_ID");
+
+
+--
+-- Name: PART_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PART_PRIVS"
+    ADD CONSTRAINT "PART_PRIVS_pkey" PRIMARY KEY ("PART_GRANT_ID");
+
+
+--
+-- Name: ROLEENTITYINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLES"
+    ADD CONSTRAINT "ROLEENTITYINDEX" UNIQUE ("ROLE_NAME");
+
+
+--
+-- Name: ROLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLES"
+    ADD CONSTRAINT "ROLES_pkey" PRIMARY KEY ("ROLE_ID");
+
+
+--
+-- Name: ROLE_MAP_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+    ADD CONSTRAINT "ROLE_MAP_pkey" PRIMARY KEY ("ROLE_GRANT_ID");
+
+
+--
+-- Name: SDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SDS"
+    ADD CONSTRAINT "SDS_pkey" PRIMARY KEY ("SD_ID");
+
+
+--
+-- Name: SD_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SD_PARAMS"
+    ADD CONSTRAINT "SD_PARAMS_pkey" PRIMARY KEY ("SD_ID", "PARAM_KEY");
+
+
+--
+-- Name: SEQUENCE_TABLE_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SEQUENCE_TABLE"
+    ADD CONSTRAINT "SEQUENCE_TABLE_pkey" PRIMARY KEY ("SEQUENCE_NAME");
+
+
+--
+-- Name: SERDES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SERDES"
+    ADD CONSTRAINT "SERDES_pkey" PRIMARY KEY ("SERDE_ID");
+
+
+--
+-- Name: SERDE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SERDE_PARAMS"
+    ADD CONSTRAINT "SERDE_PARAMS_pkey" PRIMARY KEY ("SERDE_ID", "PARAM_KEY");
+
+
+--
+-- Name: SORT_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SORT_COLS"
+    ADD CONSTRAINT "SORT_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+
+--
+-- Name: TABLE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TABLE_PARAMS"
+    ADD CONSTRAINT "TABLE_PARAMS_pkey" PRIMARY KEY ("TBL_ID", "PARAM_KEY");
+
+
+--
+-- Name: TBLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "TBLS_pkey" PRIMARY KEY ("TBL_ID");
+
+
+--
+-- Name: TBL_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBL_COL_PRIVS"
+    ADD CONSTRAINT "TBL_COL_PRIVS_pkey" PRIMARY KEY ("TBL_COLUMN_GRANT_ID");
+
+
+--
+-- Name: TBL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBL_PRIVS"
+    ADD CONSTRAINT "TBL_PRIVS_pkey" PRIMARY KEY ("TBL_GRANT_ID");
+
+
+--
+-- Name: TYPES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPES"
+    ADD CONSTRAINT "TYPES_pkey" PRIMARY KEY ("TYPES_ID");
+
+
+--
+-- Name: TYPE_FIELDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPE_FIELDS"
+    ADD CONSTRAINT "TYPE_FIELDS_pkey" PRIMARY KEY ("TYPE_NAME", "FIELD_NAME");
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST"
+    ADD CONSTRAINT "SKEWED_STRING_LIST_pkey" PRIMARY KEY ("STRING_LIST_ID");
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
+    ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_pkey" PRIMARY KEY ("STRING_LIST_ID", "INTEGER_IDX");
+
+
+ALTER TABLE ONLY "SKEWED_COL_NAMES"
+    ADD CONSTRAINT "SKEWED_COL_NAMES_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_pkey" PRIMARY KEY ("SD_ID", "STRING_LIST_ID_KID");
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+    ADD CONSTRAINT "SKEWED_VALUES_pkey" PRIMARY KEY ("SD_ID_OID", "INTEGER_IDX");
+
+--
+-- Name: TAB_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_pkey" PRIMARY KEY("CS_ID");
+
+--
+-- Name: PART_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_pkey" PRIMARY KEY("CS_ID");
+
+--
+-- Name: UNIQUEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "UNIQUEINDEX" UNIQUE ("INDEX_NAME", "ORIG_TBL_ID");
+
+
+--
+-- Name: UNIQUEPARTITION; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "UNIQUEPARTITION" UNIQUE ("PART_NAME", "TBL_ID");
+
+
+--
+-- Name: UNIQUETABLE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "UNIQUETABLE" UNIQUE ("TBL_NAME", "DB_ID");
+
+
+--
+-- Name: UNIQUE_DATABASE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DBS"
+    ADD CONSTRAINT "UNIQUE_DATABASE" UNIQUE ("NAME");
+
+
+--
+-- Name: UNIQUE_TYPE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPES"
+    ADD CONSTRAINT "UNIQUE_TYPE" UNIQUE ("TYPE_NAME");
+
+
+--
+-- Name: USERROLEMAPINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+    ADD CONSTRAINT "USERROLEMAPINDEX" UNIQUE ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: BUCKETING_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "BUCKETING_COLS_N49" ON "BUCKETING_COLS" USING btree ("SD_ID");
+
+
+--
+-- Name: COLUMNS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "COLUMNS_N49" ON "COLUMNS_OLD" USING btree ("SD_ID");
+
+
+--
+-- Name: DATABASE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "DATABASE_PARAMS_N49" ON "DATABASE_PARAMS" USING btree ("DB_ID");
+
+
+--
+-- Name: DB_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "DB_PRIVS_N49" ON "DB_PRIVS" USING btree ("DB_ID");
+
+
+--
+-- Name: IDXS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N49" ON "IDXS" USING btree ("ORIG_TBL_ID");
+
+
+--
+-- Name: IDXS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N50" ON "IDXS" USING btree ("INDEX_TBL_ID");
+
+
+--
+-- Name: IDXS_N51; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N51" ON "IDXS" USING btree ("SD_ID");
+
+
+--
+-- Name: INDEX_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "INDEX_PARAMS_N49" ON "INDEX_PARAMS" USING btree ("INDEX_ID");
+
+
+--
+-- Name: PARTITIONCOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONCOLUMNPRIVILEGEINDEX" ON "PART_COL_PRIVS" USING btree ("PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: PARTITIONEVENTINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONEVENTINDEX" ON "PARTITION_EVENTS" USING btree ("PARTITION_NAME");
+
+
+--
+-- Name: PARTITIONS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONS_N49" ON "PARTITIONS" USING btree ("TBL_ID");
+
+
+--
+-- Name: PARTITIONS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONS_N50" ON "PARTITIONS" USING btree ("SD_ID");
+
+
+--
+-- Name: PARTITION_KEYS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_KEYS_N49" ON "PARTITION_KEYS" USING btree ("TBL_ID");
+
+
+--
+-- Name: PARTITION_KEY_VALS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_KEY_VALS_N49" ON "PARTITION_KEY_VALS" USING btree ("PART_ID");
+
+
+--
+-- Name: PARTITION_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_PARAMS_N49" ON "PARTITION_PARAMS" USING btree ("PART_ID");
+
+
+--
+-- Name: PARTPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTPRIVILEGEINDEX" ON "PART_PRIVS" USING btree ("PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: PART_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_COL_PRIVS_N49" ON "PART_COL_PRIVS" USING btree ("PART_ID");
+
+
+--
+-- Name: PART_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_PRIVS_N49" ON "PART_PRIVS" USING btree ("PART_ID");
+
+
+--
+-- Name: PCS_STATS_IDX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PCS_STATS_IDX" ON "PART_COL_STATS" USING btree ("DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME");
+
+
+--
+-- Name: ROLE_MAP_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "ROLE_MAP_N49" ON "ROLE_MAP" USING btree ("ROLE_ID");
+
+
+--
+-- Name: SDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SDS_N49" ON "SDS" USING btree ("SERDE_ID");
+
+
+--
+-- Name: SD_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SD_PARAMS_N49" ON "SD_PARAMS" USING btree ("SD_ID");
+
+
+--
+-- Name: SERDE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SERDE_PARAMS_N49" ON "SERDE_PARAMS" USING btree ("SERDE_ID");
+
+
+--
+-- Name: SORT_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SORT_COLS_N49" ON "SORT_COLS" USING btree ("SD_ID");
+
+
+--
+-- Name: TABLECOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLECOLUMNPRIVILEGEINDEX" ON "TBL_COL_PRIVS" USING btree ("TBL_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: TABLEPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLEPRIVILEGEINDEX" ON "TBL_PRIVS" USING btree ("TBL_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: TABLE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLE_PARAMS_N49" ON "TABLE_PARAMS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TBLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBLS_N49" ON "TBLS" USING btree ("DB_ID");
+
+
+--
+-- Name: TBLS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBLS_N50" ON "TBLS" USING btree ("SD_ID");
+
+
+--
+-- Name: TBL_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBL_COL_PRIVS_N49" ON "TBL_COL_PRIVS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TBL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBL_PRIVS_N49" ON "TBL_PRIVS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TYPE_FIELDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TYPE_FIELDS_N49" ON "TYPE_FIELDS" USING btree ("TYPE_NAME");
+
+--
+-- Name: TAB_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TAB_COL_STATS_N49" ON "TAB_COL_STATS" USING btree ("TBL_ID");
+
+--
+-- Name: PART_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_COL_STATS_N49" ON "PART_COL_STATS" USING btree ("PART_ID");
+
+--
+-- Name: UNIQUEFUNCTION; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE UNIQUE INDEX "UNIQUEFUNCTION" ON "FUNCS" ("FUNC_NAME", "DB_ID");
+
+--
+-- Name: FUNCS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "FUNCS_N49" ON "FUNCS" ("DB_ID");
+
+--
+-- Name: FUNC_RU_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "FUNC_RU_N49" ON "FUNC_RU" ("FUNC_ID");
+
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
+    ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_fkey" FOREIGN KEY ("STRING_LIST_ID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "SKEWED_COL_NAMES"
+    ADD CONSTRAINT "SKEWED_COL_NAMES_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey1" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey2" FOREIGN KEY ("STRING_LIST_ID_KID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+    ADD CONSTRAINT "SKEWED_VALUES_fkey1" FOREIGN KEY ("STRING_LIST_ID_EID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+    ADD CONSTRAINT "SKEWED_VALUES_fkey2" FOREIGN KEY ("SD_ID_OID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: BUCKETING_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "BUCKETING_COLS"
+    ADD CONSTRAINT "BUCKETING_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: COLUMNS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "COLUMNS_OLD"
+    ADD CONSTRAINT "COLUMNS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: COLUMNS_V2_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "COLUMNS_V2"
+    ADD CONSTRAINT "COLUMNS_V2_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
+
+
+--
+-- Name: DATABASE_PARAMS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "DATABASE_PARAMS"
+    ADD CONSTRAINT "DATABASE_PARAMS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: DB_PRIVS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+    ADD CONSTRAINT "DB_PRIVS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_INDEX_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_INDEX_TBL_ID_fkey" FOREIGN KEY ("INDEX_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_ORIG_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_ORIG_TBL_ID_fkey" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: INDEX_PARAMS_INDEX_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "INDEX_PARAMS"
+    ADD CONSTRAINT "INDEX_PARAMS_INDEX_ID_fkey" FOREIGN KEY ("INDEX_ID") REFERENCES "IDXS"("INDEX_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITIONS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "PARTITIONS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITIONS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "PARTITIONS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_KEYS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_KEYS"
+    ADD CONSTRAINT "PARTITION_KEYS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_KEY_VALS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_KEY_VALS"
+    ADD CONSTRAINT "PARTITION_KEY_VALS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_PARAMS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_PARAMS"
+    ADD CONSTRAINT "PARTITION_PARAMS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_COL_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PART_COL_PRIVS"
+    ADD CONSTRAINT "PART_COL_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PART_PRIVS"
+    ADD CONSTRAINT "PART_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: ROLE_MAP_ROLE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+    ADD CONSTRAINT "ROLE_MAP_ROLE_ID_fkey" FOREIGN KEY ("ROLE_ID") REFERENCES "ROLES"("ROLE_ID") DEFERRABLE;
+
+
+--
+-- Name: SDS_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SDS"
+    ADD CONSTRAINT "SDS_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
+
+
+--
+-- Name: SDS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SDS"
+    ADD CONSTRAINT "SDS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
+
+
+--
+-- Name: SD_PARAMS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SD_PARAMS"
+    ADD CONSTRAINT "SD_PARAMS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: SERDE_PARAMS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SERDE_PARAMS"
+    ADD CONSTRAINT "SERDE_PARAMS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
+
+
+--
+-- Name: SORT_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SORT_COLS"
+    ADD CONSTRAINT "SORT_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: TABLE_PARAMS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TABLE_PARAMS"
+    ADD CONSTRAINT "TABLE_PARAMS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TBLS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "TBLS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: TBLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "TBLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: TBL_COL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBL_COL_PRIVS"
+    ADD CONSTRAINT "TBL_COL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TBL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBL_PRIVS"
+    ADD CONSTRAINT "TBL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TYPE_FIELDS_TYPE_NAME_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TYPE_FIELDS"
+    ADD CONSTRAINT "TYPE_FIELDS_TYPE_NAME_fkey" FOREIGN KEY ("TYPE_NAME") REFERENCES "TYPES"("TYPES_ID") DEFERRABLE;
+
+--
+-- Name: TAB_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_fkey" FOREIGN KEY("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_fkey" FOREIGN KEY("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "VERSION" ADD CONSTRAINT "VERSION_pkey" PRIMARY KEY ("VER_ID");
+
+-- Name: FUNCS_FK1; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ALTER TABLE ONLY "FUNCS"
+    ADD CONSTRAINT "FUNCS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "DBS" ("DB_ID") DEFERRABLE;
+
+-- Name: FUNC_RU_FK1; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ALTER TABLE ONLY "FUNC_RU"
+    ADD CONSTRAINT "FUNC_RU_FK1" FOREIGN KEY ("FUNC_ID") REFERENCES "FUNCS" ("FUNC_ID") DEFERRABLE;
+
+--
+-- Name: public; Type: ACL; Schema: -; Owner: hiveuser
+--
+
+REVOKE ALL ON SCHEMA public FROM PUBLIC;
+GRANT ALL ON SCHEMA public TO PUBLIC;
+
+--
+-- PostgreSQL database dump complete
+--
+
+------------------------------
+-- Transaction and lock tables
+-- These are not part of package jdo, so if you are going to regenerate this file you need to manually add the following section back to the file.
+------------------------------
+CREATE TABLE "txns" (
+  "txn_id" bigint PRIMARY KEY,
+  "txn_state" char(1) NOT NULL,
+  "txn_started" bigint NOT NULL,
+  "txn_last_heartbeat" bigint NOT NULL,
+  "txn_user" varchar(128) NOT NULL,
+  "txn_host" varchar(128) NOT NULL
+);
+
+CREATE TABLE "txn_components" (
+  "tc_txnid" bigint REFERENCES "txns" ("txn_id"),
+  "tc_database" varchar(128) NOT NULL,
+  "tc_table" varchar(128),
+  "tc_partition" varchar(767) DEFAULT NULL
+);
+
+CREATE TABLE "completed_txn_components" (
+  "ctc_txnid" bigint,
+  "ctc_database" varchar(128) NOT NULL,
+  "ctc_table" varchar(128),
+  "ctc_partition" varchar(767)
+);
+
+CREATE TABLE "next_txn_id" (
+  "ntxn_next" bigint NOT NULL
+);
+INSERT INTO "next_txn_id" VALUES(1);
+
+CREATE TABLE "hive_locks" (
+  "hl_lock_ext_id" bigint NOT NULL,
+  "hl_lock_int_id" bigint NOT NULL,
+  "hl_txnid" bigint,
+  "hl_db" varchar(128) NOT NULL,
+  "hl_table" varchar(128),
+  "hl_partition" varchar(767) DEFAULT NULL,
+  "hl_lock_state" char(1) NOT NULL,
+  "hl_lock_type" char(1) NOT NULL,
+  "hl_last_heartbeat" bigint NOT NULL,
+  "hl_acquired_at" bigint,
+  "hl_user" varchar(128) NOT NULL,
+  "hl_host" varchar(128) NOT NULL,
+  PRIMARY KEY("hl_lock_ext_id", "hl_lock_int_id")
+);
+
+CREATE INDEX "hl_txnid_index" ON "hive_locks" USING hash ("hl_txnid");
+
+CREATE TABLE "next_lock_id" (
+  "nl_next" bigint NOT NULL
+);
+INSERT INTO "next_lock_id" VALUES(1);
+
+CREATE TABLE "compaction_queue" (
+  "cq_id" bigint PRIMARY KEY,
+  "cq_database" varchar(128) NOT NULL,
+  "cq_table" varchar(128) NOT NULL,
+  "cq_partition" varchar(767),
+  "cq_state" char(1) NOT NULL,
+  "cq_type" char(1) NOT NULL,
+  "cq_worker_id" varchar(128),
+  "cq_start" bigint,
+  "cq_run_as" varchar(128)
+);
+
+CREATE TABLE "next_compaction_queue_id" (
+  "ncq_next" bigint NOT NULL
+);
+INSERT INTO "next_compaction_queue_id" VALUES(1);
+
+-- -----------------------------------------------------------------
+-- Record schema version. Should be the last step in the init script
+-- -----------------------------------------------------------------
+INSERT INTO "VERSION" ("VER_ID", "SCHEMA_VERSION", "VERSION_COMMENT") VALUES (1, '0.14.0', 'Hive release version 0.14.0');

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/upgrade-0.12.0-to-0.13.0.oracle.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/upgrade-0.12.0-to-0.13.0.oracle.sql b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/upgrade-0.12.0-to-0.13.0.oracle.sql
new file mode 100755
index 0000000..2a4231a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/upgrade-0.12.0-to-0.13.0.oracle.sql
@@ -0,0 +1,165 @@
+SELECT 'Upgrading MetaStore schema from 0.12.0 to 0.13.0' AS Status from dual;
+
+-- 15-HIVE-5700.oracle.sql
+-- Normalize the date partition column values as best we can. No schema changes.
+
+CREATE FUNCTION hive13_to_date(date_str IN VARCHAR2) RETURN DATE IS dt DATE; BEGIN dt := TO_DATE(date_str, 'YYYY-MM-DD'); RETURN dt; EXCEPTION WHEN others THEN RETURN null; END;/
+
+MERGE INTO PARTITION_KEY_VALS
+USING (
+  SELECT SRC.PART_ID as IPART_ID, SRC.INTEGER_IDX as IINTEGER_IDX,
+     NVL(TO_CHAR(hive13_to_date(PART_KEY_VAL),'YYYY-MM-DD'), PART_KEY_VAL) as NORM
+  FROM PARTITION_KEY_VALS SRC
+    INNER JOIN PARTITIONS ON SRC.PART_ID = PARTITIONS.PART_ID
+    INNER JOIN PARTITION_KEYS ON PARTITION_KEYS.TBL_ID = PARTITIONS.TBL_ID
+      AND PARTITION_KEYS.INTEGER_IDX = SRC.INTEGER_IDX AND PARTITION_KEYS.PKEY_TYPE = 'date'
+) ON (IPART_ID = PARTITION_KEY_VALS.PART_ID AND IINTEGER_IDX = PARTITION_KEY_VALS.INTEGER_IDX)
+WHEN MATCHED THEN UPDATE SET PART_KEY_VAL = NORM;
+
+DROP FUNCTION hive13_to_date;
+
+-- 16-HIVE-6386.oracle.sql
+ALTER TABLE DBS ADD OWNER_NAME VARCHAR2(128);
+ALTER TABLE DBS ADD OWNER_TYPE VARCHAR2(10);
+
+-- 17-HIVE-6458.oracle.sql
+CREATE TABLE FUNCS (
+  FUNC_ID NUMBER NOT NULL,
+  CLASS_NAME VARCHAR2(4000),
+  CREATE_TIME NUMBER(10) NOT NULL,
+  DB_ID NUMBER,
+  FUNC_NAME VARCHAR2(128),
+  FUNC_TYPE NUMBER(10) NOT NULL,
+  OWNER_NAME VARCHAR2(128),
+  OWNER_TYPE VARCHAR2(10)
+);
+
+ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_PK PRIMARY KEY (FUNC_ID);
+ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED;
+CREATE UNIQUE INDEX UNIQUEFUNCTION ON FUNCS (FUNC_NAME, DB_ID);
+CREATE INDEX FUNCS_N49 ON FUNCS (DB_ID);
+
+CREATE TABLE FUNC_RU (
+  FUNC_ID NUMBER NOT NULL,
+  RESOURCE_TYPE NUMBER(10) NOT NULL,
+  RESOURCE_URI VARCHAR2(4000),
+  INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_PK PRIMARY KEY (FUNC_ID, INTEGER_IDX);
+ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_FK1 FOREIGN KEY (FUNC_ID) REFERENCES FUNCS (FUNC_ID) INITIALLY DEFERRED;
+CREATE INDEX FUNC_RU_N49 ON FUNC_RU (FUNC_ID);
+
+-- 18-HIVE-6757.oracle.sql
+UPDATE SDS
+  SET INPUT_FORMAT = 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat'
+WHERE
+  INPUT_FORMAT= 'parquet.hive.DeprecatedParquetInputFormat' or
+  INPUT_FORMAT = 'parquet.hive.MapredParquetInputFormat'
+;
+
+UPDATE SDS
+  SET OUTPUT_FORMAT = 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat'
+WHERE
+  OUTPUT_FORMAT = 'parquet.hive.DeprecatedParquetOutputFormat'  or
+  OUTPUT_FORMAT = 'parquet.hive.MapredParquetOutputFormat'
+;
+
+UPDATE SERDES
+  SET SLIB='org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe'
+WHERE
+  SLIB = 'parquet.hive.serde.ParquetHiveSerDe'
+;
+
+-- hive-txn-schema-0.13.0.oracle.sql
+
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the License); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an AS IS BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+
+--
+-- Tables for transaction management
+--
+
+CREATE TABLE TXNS (
+  TXN_ID NUMBER(19) PRIMARY KEY,
+  TXN_STATE char(1) NOT NULL,
+  TXN_STARTED NUMBER(19) NOT NULL,
+  TXN_LAST_HEARTBEAT NUMBER(19) NOT NULL,
+  TXN_USER varchar(128) NOT NULL,
+  TXN_HOST varchar(128) NOT NULL
+);
+
+CREATE TABLE TXN_COMPONENTS (
+  TC_TXNID NUMBER(19) REFERENCES TXNS (TXN_ID),
+  TC_DATABASE VARCHAR2(128) NOT NULL,
+  TC_TABLE VARCHAR2(128),
+  TC_PARTITION VARCHAR2(767) NULL
+);
+
+CREATE TABLE COMPLETED_TXN_COMPONENTS (
+  CTC_TXNID NUMBER(19),
+  CTC_DATABASE varchar(128) NOT NULL,
+  CTC_TABLE varchar(128),
+  CTC_PARTITION varchar(767)
+);
+
+CREATE TABLE NEXT_TXN_ID (
+  NTXN_NEXT NUMBER(19) NOT NULL
+);
+INSERT INTO NEXT_TXN_ID VALUES(1);
+
+CREATE TABLE HIVE_LOCKS (
+  HL_LOCK_EXT_ID NUMBER(19) NOT NULL,
+  HL_LOCK_INT_ID NUMBER(19) NOT NULL,
+  HL_TXNID NUMBER(19),
+  HL_DB VARCHAR2(128) NOT NULL,
+  HL_TABLE VARCHAR2(128),
+  HL_PARTITION VARCHAR2(767),
+  HL_LOCK_STATE CHAR(1) NOT NULL,
+  HL_LOCK_TYPE CHAR(1) NOT NULL,
+  HL_LAST_HEARTBEAT NUMBER(19) NOT NULL,
+  HL_ACQUIRED_AT NUMBER(19),
+  HL_USER varchar(128) NOT NULL,
+  HL_HOST varchar(128) NOT NULL,
+  PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID)
+);
+
+CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID);
+
+CREATE TABLE NEXT_LOCK_ID (
+  NL_NEXT NUMBER(19) NOT NULL
+);
+INSERT INTO NEXT_LOCK_ID VALUES(1);
+
+CREATE TABLE COMPACTION_QUEUE (
+  CQ_ID NUMBER(19) PRIMARY KEY,
+  CQ_DATABASE varchar(128) NOT NULL,
+  CQ_TABLE varchar(128) NOT NULL,
+  CQ_PARTITION varchar(767),
+  CQ_STATE char(1) NOT NULL,
+  CQ_TYPE char(1) NOT NULL,
+  CQ_WORKER_ID varchar(128),
+  CQ_START NUMBER(19),
+  CQ_RUN_AS varchar(128)
+);
+
+CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
+  NCQ_NEXT NUMBER(19) NOT NULL
+);
+INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+
+
+UPDATE VERSION SET SCHEMA_VERSION='0.13.0', VERSION_COMMENT='Hive release version 0.13.0' where VER_ID=1;
+SELECT 'Finished upgrading MetaStore schema from 0.12.0 to 0.13.0' AS Status from dual;

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/upgrade-0.13.0.oracle.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/upgrade-0.13.0.oracle.sql b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/upgrade-0.13.0.oracle.sql
new file mode 100755
index 0000000..b34f406
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/upgrade-0.13.0.oracle.sql
@@ -0,0 +1,38 @@
+ALTER TABLE TXNS MODIFY (
+  TXN_ID NUMBER(19),
+  TXN_STARTED NUMBER(19),
+  TXN_LAST_HEARTBEAT NUMBER(19)
+);
+
+ALTER TABLE TXN_COMPONENTS MODIFY (
+  TC_TXNID NUMBER(19)
+);
+
+ALTER TABLE COMPLETED_TXN_COMPONENTS MODIFY (
+  CTC_TXNID NUMBER(19)
+);
+
+ALTER TABLE NEXT_TXN_ID MODIFY (
+  NTXN_NEXT NUMBER(19)
+);
+
+ALTER TABLE HIVE_LOCKS MODIFY (
+  HL_LOCK_EXT_ID NUMBER(19),
+  HL_LOCK_INT_ID NUMBER(19),
+  HL_TXNID NUMBER(19),
+  HL_LAST_HEARTBEAT NUMBER(19),
+  HL_ACQUIRED_AT NUMBER(19)
+);
+
+ALTER TABLE NEXT_LOCK_ID MODIFY (
+  NL_NEXT NUMBER(19)
+);
+
+ALTER TABLE COMPACTION_QUEUE MODIFY (
+  CQ_ID NUMBER(19),
+  CQ_START NUMBER(19)
+);
+
+ALTER TABLE NEXT_COMPACTION_QUEUE_ID MODIFY (
+  NCQ_NEXT NUMBER(19)
+);

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/kerberos.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/kerberos.json
new file mode 100755
index 0000000..932b71b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/kerberos.json
@@ -0,0 +1,112 @@
+{
+  "services": [
+    {
+      "name": "HIVE",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "configurations": [
+        {
+          "hive-site": {
+            "hive.metastore.sasl.enabled": "true",
+            "hive.security.authorization.enabled": "true",
+            "hive.server2.authentication": "KERBEROS"
+          }
+        },
+        {
+          "webhcat-site": {
+            "templeton.kerberos.secret": "secret",
+            "templeton.hive.properties": "hive.metastore.local=false,hive.metastore.uris=thrift://${clusterHostInfo/hive_metastore_host}:9083,hive.metastore.sasl.enabled=true,hive.metastore.execute.setugi=true,hive.metastore.warehouse.dir=/apps/hive/warehouse,hive.exec.mode.local.auto=false,hive.metastore.kerberos.principal=hive/_HOST@${realm}"
+          }
+        },
+        {
+          "core-site": {
+            "hadoop.proxyuser.HTTP.hosts": "${clusterHostInfo/webhcat_server_host}"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "HIVE_METASTORE",
+          "identities": [
+            {
+              "name": "hive_metastore_hive",
+              "principal": {
+                "value": "hive/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hive-site/hive.metastore.kerberos.principal",
+                "local_username": "${hive-env/hive_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hive.service.keytab",
+                "owner": {
+                  "name": "${hive-env/hive_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hive-site/hive.metastore.kerberos.keytab.file"
+              }
+            }
+          ]
+        },
+        {
+          "name": "HIVE_SERVER",
+          "identities": [
+            {
+              "name": "hive_server_hive",
+              "principal": {
+                "value": "hive/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hive-site/hive.server2.authentication.kerberos.principal",
+                "local_username": "${hive-env/hive_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hive.service.keytab",
+                "owner": {
+                  "name": "${hive-env/hive_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hive-site/hive.server2.authentication.kerberos.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "hive-site/hive.server2.authentication.spnego.principal"
+              },
+              "keytab": {
+                "configuration": "hive-site/hive.server2.authentication.spnego.keytab"
+              }
+            }
+          ]
+        },
+        {
+          "name": "WEBHCAT_SERVER",
+          "identities": [
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "webhcat-site/templeton.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "webhcat-site/templeton.kerberos.keytab"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/metainfo.xml
new file mode 100755
index 0000000..44e63e7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/metainfo.xml
@@ -0,0 +1,327 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HIVE</name>
+      <displayName>Hive</displayName>
+      <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
+      <version>1.2.1</version>
+      <components>
+
+        <component>
+          <name>HIVE_METASTORE</name>
+          <displayName>Hive Metastore</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <clientsToUpdateConfigs></clientsToUpdateConfigs>
+          <auto-deploy>
+            <enabled>true</enabled>
+            <co-locate>HIVE/HIVE_SERVER</co-locate>
+          </auto-deploy>
+          <commandScript>
+            <script>scripts/hive_metastore.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <configuration-dependencies>
+            <config-type>hive-site</config-type>
+          </configuration-dependencies>
+        </component>
+
+        <component>
+          <name>HIVE_SERVER</name>
+          <displayName>HiveServer2</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <clientsToUpdateConfigs></clientsToUpdateConfigs>
+          <dependencies>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+              <scope>cluster</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+                <co-locate>HIVE/HIVE_SERVER</co-locate>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>YARN/YARN_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+
+			<!--dependency>
+			<name>TEZ/TEZ_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+			</dependency-->
+
+            <dependency>
+              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/hive_server.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <configuration-dependencies>
+            <config-type>hive-site</config-type>
+          </configuration-dependencies>
+        </component>
+
+        <component>
+          <name>WEBHCAT_SERVER</name>
+          <displayName>WebHCat Server</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <clientsToUpdateConfigs>
+            <client>HCAT</client>
+          </clientsToUpdateConfigs>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+              <scope>cluster</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+                <co-locate>HIVE/WEBHCAT_SERVER</co-locate>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>YARN/YARN_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>PIG/PIG</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>SQOOP/SQOOP</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/webhcat_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <configuration-dependencies>
+            <config-type>hive-site</config-type>
+          </configuration-dependencies>
+        </component>
+
+        <component>
+          <name>MYSQL_SERVER</name>
+          <displayName>MySQL Server</displayName>
+          <category>MASTER</category>
+          <cardinality>0-1</cardinality>
+          <versionAdvertised>false</versionAdvertised>
+          <clientsToUpdateConfigs></clientsToUpdateConfigs>
+          <commandScript>
+            <script>scripts/mysql_server.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>CLEAN</name>
+                <commandScript>
+                  <script>scripts/mysql_server.py</script>
+                  <scriptType>PYTHON</scriptType>
+                  <timeout>600</timeout>
+                </commandScript>
+            </customCommand>
+          </customCommands>
+        </component>
+
+        <component>
+          <name>HIVE_CLIENT</name>
+          <displayName>Hive Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/hive_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>xml</type>
+              <fileName>hive-site.xml</fileName>
+              <dictionaryName>hive-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>hive-env.sh</fileName>
+              <dictionaryName>hive-env</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>hive-log4j.properties</fileName>
+              <dictionaryName>hive-log4j</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>hive-exec-log4j.properties</fileName>
+              <dictionaryName>hive-exec-log4j</dictionaryName>
+            </configFile>
+          </configFiles>
+          <configuration-dependencies>
+            <config-type>hive-site</config-type>
+          </configuration-dependencies>
+        </component>
+        <component>
+          <name>HCAT</name>
+          <displayName>HCat Client</displayName>
+          <category>CLIENT</category>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/hcat_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>env</type>
+              <fileName>hcat-env.sh</fileName>
+              <dictionaryName>hcat-env</dictionaryName>
+            </configFile>
+          </configFiles>
+          <configuration-dependencies>
+            <config-type>hive-site</config-type>
+          </configuration-dependencies>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>hive</name>
+            </package>
+            <package>
+              <!--name>hcatalog</name -->
+              <name>hive-hcatalog</name>
+            </package>
+            <package>
+              <!--  name>webhcat-tar-hive</name-->
+              <name>hive-webhcat</name>
+            </package>
+            <!--package>
+              <name>webhcat-tar-pig</name>
+            </package-->
+            <package>
+              <name>mysql-connector-java</name>
+              <condition>should_install_mysl_connector</condition>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>mysql</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>redhat6,ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>mysql-server</name>
+              <condition>should_install_mysql</condition>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>suse11</osFamily>
+          <packages>
+            <package>
+              <name>mysql-client</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <requiredServices>
+        <service>ZOOKEEPER</service>
+        <service>YARN</service>
+        <!--service>TEZ</service-->
+        <service>PIG</service>
+        <service>SQOOP</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>hive-log4j</config-type>
+        <config-type>hive-exec-log4j</config-type>
+        <config-type>hive-env</config-type>
+		<!--config-type>tez-site</config-type-->
+        <config-type>webhcat-site</config-type>
+        <config-type>webhcat-env</config-type>
+        <config-type>mapred-site</config-type>
+      </configuration-dependencies>
+
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/alerts/alert_hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/alerts/alert_hive_metastore.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/alerts/alert_hive_metastore.py
new file mode 100755
index 0000000..9d4d6a2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/alerts/alert_hive_metastore.py
@@ -0,0 +1,184 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import os
+import socket
+import time
+
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.core.resources import Execute
+from ambari_commons.os_check import OSConst
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+
+OK_MESSAGE = "Metastore OK - Hive command took {0:.3f}s"
+CRITICAL_MESSAGE = "Metastore on {0} failed ({1})"
+
+SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
+SMOKEUSER_KEYTAB_KEY = '{{cluster-env/smokeuser_keytab}}'
+SMOKEUSER_PRINCIPAL_KEY = '{{cluster-env/smokeuser_principal_name}}'
+SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
+HIVE_METASTORE_URIS_KEY = '{{hive-site/hive.metastore.uris}}'
+
+# The configured Kerberos executable search paths, if any
+KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY = '{{kerberos-env/executable_search_paths}}'
+
+PERCENT_WARNING = 200
+PERCENT_CRITICAL = 200
+
+# default keytab location
+SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY = 'default.smoke.keytab'
+SMOKEUSER_KEYTAB_DEFAULT = '/etc/security/keytabs/smokeuser.headless.keytab'
+
+# default smoke principal
+SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY = 'default.smoke.principal'
+SMOKEUSER_PRINCIPAL_DEFAULT = 'ambari-qa@EXAMPLE.COM'
+
+# default smoke user
+SMOKEUSER_SCRIPT_PARAM_KEY = 'default.smoke.user'
+SMOKEUSER_DEFAULT = 'ambari-qa'
+
+HIVE_CONF_DIR = '/usr/iop/current/hive-metastore/conf/conf.server'
+HIVE_CONF_DIR_LEGACY = '/etc/hive/conf.server'
+
+HIVE_BIN_DIR = '/usr/iop/current/hive-metastore/bin'
+HIVE_BIN_DIR_LEGACY = '/usr/lib/hive/bin'
+
+
+HADOOPUSER_KEY = '{{cluster-env/hadoop.user.name}}'
+HADOOPUSER_DEFAULT = 'hadoop'
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (SECURITY_ENABLED_KEY,SMOKEUSER_KEYTAB_KEY,SMOKEUSER_PRINCIPAL_KEY,
+    HIVE_METASTORE_URIS_KEY, SMOKEUSER_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY)
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  if configurations is None:
+    return (('UNKNOWN', ['There were no configurations supplied to the script.']))
+
+  if not HIVE_METASTORE_URIS_KEY in configurations:
+    return (('UNKNOWN', ['Hive metastore uris were not supplied to the script.']))
+
+  metastore_uris = configurations[HIVE_METASTORE_URIS_KEY].split(',')
+
+  security_enabled = False
+  if SECURITY_ENABLED_KEY in configurations:
+    security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
+
+  # defaults
+  smokeuser_keytab = SMOKEUSER_KEYTAB_DEFAULT
+  smokeuser_principal = SMOKEUSER_PRINCIPAL_DEFAULT
+  smokeuser = SMOKEUSER_DEFAULT
+
+  # check script params
+  if SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY in parameters:
+    smokeuser_principal = parameters[SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY]
+
+  if SMOKEUSER_SCRIPT_PARAM_KEY in parameters:
+    smokeuser = parameters[SMOKEUSER_SCRIPT_PARAM_KEY]
+
+  if SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY in parameters:
+    smokeuser_keytab = parameters[SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY]
+
+
+  # check configurations last as they should always take precedence
+  if SMOKEUSER_PRINCIPAL_KEY in configurations:
+    smokeuser_principal = configurations[SMOKEUSER_PRINCIPAL_KEY]
+
+  if SMOKEUSER_KEY in configurations:
+    smokeuser = configurations[SMOKEUSER_KEY]
+
+  result_code = None
+
+  try:
+    if security_enabled:
+      if SMOKEUSER_KEYTAB_KEY in configurations:
+        smokeuser_keytab = configurations[SMOKEUSER_KEYTAB_KEY]
+
+      # Get the configured Kerberos executable search paths, if any
+      if KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY in configurations:
+        kerberos_executable_search_paths = configurations[KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY]
+      else:
+        kerberos_executable_search_paths = None
+
+      kinit_path_local = get_kinit_path(kerberos_executable_search_paths)
+      kinitcmd=format("{kinit_path_local} -kt {smokeuser_keytab} {smokeuser_principal}; ")
+
+      Execute(kinitcmd, user=smokeuser,
+        path=["/bin/", "/usr/bin/", "/usr/lib/hive/bin/", "/usr/sbin/"],
+        timeout=10)
+
+    if host_name is None:
+      host_name = socket.getfqdn()
+
+    for uri in metastore_uris:
+      if host_name in uri:
+        metastore_uri = uri
+
+    conf_dir = HIVE_CONF_DIR_LEGACY
+    bin_dir = HIVE_BIN_DIR_LEGACY
+
+    if os.path.exists(HIVE_CONF_DIR):
+      conf_dir = HIVE_CONF_DIR
+      bin_dir = HIVE_BIN_DIR
+
+    cmd = format("export HIVE_CONF_DIR='{conf_dir}' ; "
+                 "hive --hiveconf hive.metastore.uris={metastore_uri}\
+                 --hiveconf hive.metastore.client.connect.retry.delay=1s\
+                 --hiveconf hive.metastore.failure.retries=1\
+                 --hiveconf hive.metastore.connect.retries=1\
+                 --hiveconf hive.metastore.client.socket.timeout=20s\
+                 --hiveconf hive.execution.engine=mr -e 'show databases;'")
+
+    start_time = time.time()
+
+    try:
+      Execute(cmd, user=smokeuser,
+        path=["/bin/", "/usr/bin/", "/usr/sbin/", bin_dir],
+        timeout=60 )
+
+      total_time = time.time() - start_time
+
+      result_code = 'OK'
+      label = OK_MESSAGE.format(total_time)
+    except Exception, exception:
+      result_code = 'CRITICAL'
+      label = CRITICAL_MESSAGE.format(host_name, str(exception))
+
+  except Exception, e:
+    label = str(e)
+    result_code = 'UNKNOWN'
+
+  return ((result_code, [label]))
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/alerts/alert_hive_thrift_port.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/alerts/alert_hive_thrift_port.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/alerts/alert_hive_thrift_port.py
new file mode 100755
index 0000000..28d43c3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/alerts/alert_hive_thrift_port.py
@@ -0,0 +1,265 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import os
+import socket
+import time
+from resource_management.libraries.functions import hive_check
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import get_kinit_path
+from ambari_commons.os_check import OSConst
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+
+OK_MESSAGE = "TCP OK - {0:.3f}s response on port {1}"
+CRITICAL_MESSAGE = "Connection failed on host {0}:{1} ({2})"
+
+HIVE_SERVER_THRIFT_PORT_KEY = '{{hive-site/hive.server2.thrift.port}}'
+HIVE_SERVER_THRIFT_HTTP_PORT_KEY = '{{hive-site/hive.server2.thrift.http.port}}'
+HIVE_SERVER_TRANSPORT_MODE_KEY = '{{hive-site/hive.server2.transport.mode}}'
+SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
+HIVE_SERVER2_AUTHENTICATION_KEY = '{{hive-site/hive.server2.authentication}}'
+HIVE_SERVER_PRINCIPAL_KEY = '{{hive-site/hive.server2.authentication.kerberos.principal}}'
+SMOKEUSER_KEYTAB_KEY = '{{cluster-env/smokeuser_keytab}}'
+SMOKEUSER_PRINCIPAL_KEY = '{{cluster-env/smokeuser_principal_name}}'
+SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
+HIVE_SSL = '{{hive-site/hive.server2.use.SSL}}'
+HIVE_SSL_KEYSTORE_PATH = '{{hive-site/hive.server2.keystore.path}}'
+HIVE_SSL_KEYSTORE_PASSWORD = '{{hive-site/hive.server2.keystore.password}}'
+
+PERCENT_WARNING = 200
+PERCENT_CRITICAL = 200
+
+# The configured Kerberos executable search paths, if any
+KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY = '{{kerberos-env/executable_search_paths}}'
+
+THRIFT_PORT_DEFAULT = 10000
+HIVE_SERVER_TRANSPORT_MODE_DEFAULT = 'binary'
+HIVE_SERVER_PRINCIPAL_DEFAULT = 'hive/_HOST@EXAMPLE.COM'
+HIVE_SERVER2_AUTHENTICATION_DEFAULT = 'NOSASL'
+# default keytab location
+SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY = 'default.smoke.keytab'
+SMOKEUSER_KEYTAB_DEFAULT = '/etc/security/keytabs/smokeuser.headless.keytab'
+# default smoke principal
+SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY = 'default.smoke.principal'
+SMOKEUSER_PRINCIPAL_DEFAULT = 'ambari-qa@EXAMPLE.COM'
+# default smoke user
+SMOKEUSER_SCRIPT_PARAM_KEY = 'default.smoke.user'
+SMOKEUSER_DEFAULT = 'ambari-qa'
+
+HADOOPUSER_KEY = '{{cluster-env/hadoop.user.name}}'
+HADOOPUSER_DEFAULT = 'hadoop'
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (HIVE_SERVER_THRIFT_PORT_KEY, SECURITY_ENABLED_KEY, SMOKEUSER_KEY,
+          HIVE_SERVER2_AUTHENTICATION_KEY, HIVE_SERVER_PRINCIPAL_KEY,
+          SMOKEUSER_KEYTAB_KEY, SMOKEUSER_PRINCIPAL_KEY, HIVE_SERVER_THRIFT_HTTP_PORT_KEY,
+          HIVE_SERVER_TRANSPORT_MODE_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY, HIVE_SSL,
+          HIVE_SSL_KEYSTORE_PATH, HIVE_SSL_KEYSTORE_PASSWORD)
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (HIVE_SERVER_THRIFT_PORT_KEY, HIVE_SERVER_THRIFT_HTTP_PORT_KEY,
+          HIVE_SERVER_TRANSPORT_MODE_KEY, HADOOPUSER_KEY)
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  if configurations is None:
+    return ('UNKNOWN', ['There were no configurations supplied to the script.'])
+
+  transport_mode = HIVE_SERVER_TRANSPORT_MODE_DEFAULT
+  if HIVE_SERVER_TRANSPORT_MODE_KEY in configurations:
+    transport_mode = configurations[HIVE_SERVER_TRANSPORT_MODE_KEY]
+
+  port = THRIFT_PORT_DEFAULT
+  if transport_mode.lower() == 'binary' and HIVE_SERVER_THRIFT_PORT_KEY in configurations:
+    port = int(configurations[HIVE_SERVER_THRIFT_PORT_KEY])
+  elif transport_mode.lower() == 'http' and HIVE_SERVER_THRIFT_HTTP_PORT_KEY in configurations:
+    port = int(configurations[HIVE_SERVER_THRIFT_HTTP_PORT_KEY])
+
+  security_enabled = False
+  if SECURITY_ENABLED_KEY in configurations:
+    security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
+
+  hive_server2_authentication = HIVE_SERVER2_AUTHENTICATION_DEFAULT
+  if HIVE_SERVER2_AUTHENTICATION_KEY in configurations:
+    hive_server2_authentication = configurations[HIVE_SERVER2_AUTHENTICATION_KEY]
+
+  if hive_server2_authentication != "KERBEROS" and hive_server2_authentication != "NONE":
+    return ('OK', [''])
+
+  hive_ssl = False
+  if HIVE_SSL in configurations:
+    hive_ssl = configurations[HIVE_SSL]
+
+  hive_ssl_keystore_path = None
+  if HIVE_SSL_KEYSTORE_PATH in configurations:
+    hive_ssl_keystore_path = configurations[HIVE_SSL_KEYSTORE_PATH]
+
+  hive_ssl_keystore_password = None
+  if HIVE_SSL_KEYSTORE_PASSWORD in configurations:
+    hive_ssl_keystore_password = configurations[HIVE_SSL_KEYSTORE_PASSWORD]
+
+  # defaults
+  smokeuser_keytab = SMOKEUSER_KEYTAB_DEFAULT
+  smokeuser_principal = SMOKEUSER_PRINCIPAL_DEFAULT
+  smokeuser = SMOKEUSER_DEFAULT
+
+  # check script params
+  if SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY in parameters:
+    smokeuser_principal = parameters[SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY]
+
+  if SMOKEUSER_SCRIPT_PARAM_KEY in parameters:
+    smokeuser = parameters[SMOKEUSER_SCRIPT_PARAM_KEY]
+
+  if SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY in parameters:
+    smokeuser_keytab = parameters[SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY]
+
+
+  # check configurations last as they should always take precedence
+  if SMOKEUSER_PRINCIPAL_KEY in configurations:
+    smokeuser_principal = configurations[SMOKEUSER_PRINCIPAL_KEY]
+
+  if SMOKEUSER_KEY in configurations:
+    smokeuser = configurations[SMOKEUSER_KEY]
+
+  result_code = None
+
+  if security_enabled:
+    hive_server_principal = HIVE_SERVER_PRINCIPAL_DEFAULT
+    if HIVE_SERVER_PRINCIPAL_KEY in configurations:
+      hive_server_principal = configurations[HIVE_SERVER_PRINCIPAL_KEY]
+
+    if SMOKEUSER_KEYTAB_KEY in configurations:
+      smokeuser_keytab = configurations[SMOKEUSER_KEYTAB_KEY]
+
+    # Get the configured Kerberos executable search paths, if any
+    if KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY in configurations:
+      kerberos_executable_search_paths = configurations[KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY]
+    else:
+      kerberos_executable_search_paths = None
+
+    kinit_path_local = get_kinit_path(kerberos_executable_search_paths)
+    kinitcmd=format("{kinit_path_local} -kt {smokeuser_keytab} {smokeuser_principal}; ")
+  else:
+    hive_server_principal = None
+    kinitcmd=None
+
+  try:
+    if host_name is None:
+      host_name = socket.getfqdn()
+
+    start_time = time.time()
+
+    try:
+      hive_check.check_thrift_port_sasl(host_name, port, hive_server2_authentication, hive_server_principal,
+                                        kinitcmd, smokeuser, transport_mode=transport_mode, ssl=hive_ssl,
+                                        ssl_keystore=hive_ssl_keystore_path, ssl_password=hive_ssl_keystore_password)
+      result_code = 'OK'
+      total_time = time.time() - start_time
+      label = OK_MESSAGE.format(total_time, port)
+    except Exception, exception:
+      result_code = 'CRITICAL'
+      label = CRITICAL_MESSAGE.format(host_name, port, str(exception))
+
+  except Exception, e:
+    label = str(e)
+    result_code = 'UNKNOWN'
+
+  return (result_code, [label])
+
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  from resource_management.libraries.functions import reload_windows_env
+  from resource_management.core.resources import Execute
+  reload_windows_env()
+  hive_home = os.environ['HIVE_HOME']
+
+  if configurations is None:
+    return ('UNKNOWN', ['There were no configurations supplied to the script.'])
+
+  transport_mode = HIVE_SERVER_TRANSPORT_MODE_DEFAULT
+  if HIVE_SERVER_TRANSPORT_MODE_KEY in configurations:
+    transport_mode = configurations[HIVE_SERVER_TRANSPORT_MODE_KEY]
+
+  port = THRIFT_PORT_DEFAULT
+  if transport_mode.lower() == 'binary' and HIVE_SERVER_THRIFT_PORT_KEY in configurations:
+    port = int(configurations[HIVE_SERVER_THRIFT_PORT_KEY])
+  elif transport_mode.lower() == 'http' and HIVE_SERVER_THRIFT_HTTP_PORT_KEY in configurations:
+    port = int(configurations[HIVE_SERVER_THRIFT_HTTP_PORT_KEY])
+
+  hiveuser = HADOOPUSER_DEFAULT
+  if HADOOPUSER_KEY in configurations:
+    hiveuser = configurations[HADOOPUSER_KEY]
+
+  result_code = None
+  try:
+    if host_name is None:
+      host_name = socket.getfqdn()
+
+    beeline_url = ['jdbc:hive2://{host_name}:{port}/', "transportMode={transport_mode}"]
+    # append url according to used transport
+    if transport_mode == "http":
+      beeline_url.append('httpPath=cliservice')
+    beeline_url_string = format(";".join(beeline_url))
+    beeline_cmd = os.path.join(hive_home, "bin", "beeline.cmd")
+    cmd = format("cmd /c {beeline_cmd} -u {beeline_url_string} -e '' 2>&1 | findstr Connected")
+
+    start_time = time.time()
+    try:
+      Execute(cmd, user=hiveuser, timeout=30)
+      total_time = time.time() - start_time
+      result_code = 'OK'
+      label = OK_MESSAGE.format(total_time, port)
+    except Exception, exception:
+      result_code = 'CRITICAL'
+      label = CRITICAL_MESSAGE.format(host_name, port, str(exception))
+  except Exception, e:
+    label = str(e)
+    result_code = 'UNKNOWN'
+
+  return (result_code, [label])


[14/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/configuration/yarn-site.xml
new file mode 100755
index 0000000..397f96f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/configuration/yarn-site.xml
@@ -0,0 +1,748 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
+
+  <!-- ResourceManager -->
+
+  <property>
+    <name>yarn.resourcemanager.hostname</name>
+    <value>localhost</value>
+    <description>The hostname of the RM.</description>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.resource-tracker.address</name>
+    <value>localhost:8025</value>
+    <description> The address of ResourceManager. </description>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.scheduler.address</name>
+    <value>localhost:8030</value>
+    <description>The address of the scheduler interface.</description>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.address</name>
+    <value>localhost:8050</value>
+    <description>
+      The address of the applications manager interface in the
+      RM.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.admin.address</name>
+    <value>localhost:8141</value>
+    <description>The address of the RM admin interface.</description>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.scheduler.class</name>
+    <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
+    <description>The class to use as the resource scheduler.</description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.minimum-allocation-mb</name>
+    <value>512</value>
+    <description>
+      The minimum allocation for every container request at the RM,
+      in MBs. Memory requests lower than this won't take effect,
+      and the specified value will get allocated at minimum.
+    </description>
+    <display-name>Minimum Container Size (Memory)</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>5120</maximum>
+      <unit>MB</unit>
+      <increment-step>250</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.nodemanager.resource.memory-mb</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.maximum-allocation-mb</name>
+    <value>2048</value>
+    <description>
+      The maximum allocation for every container request at the RM,
+      in MBs. Memory requests higher than this won't take effect,
+      and will get capped to this value.
+    </description>
+    <display-name>Maximum Container Size (Memory)</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>5120</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.nodemanager.resource.memory-mb</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>yarn.acl.enable</name>
+    <value>false</value>
+    <description> Are acls enabled. </description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>yarn.admin.acl</name>
+    <value></value>
+    <description> ACL of who can be admin of the YARN cluster. </description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+
+  <!-- NodeManager -->
+
+  <property>
+    <name>yarn.nodemanager.address</name>
+    <value>0.0.0.0:45454</value>
+    <description>The address of the container manager in the NM.</description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.resource.memory-mb</name>
+    <value>5120</value>
+    <description>Amount of physical memory, in MB, that can be allocated
+      for containers.</description>
+    <display-name>Memory allocated for all YARN containers on a node</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>268435456</maximum>
+      <unit>MB</unit>
+      <increment-step>250</increment-step>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>yarn.application.classpath</name>
+    <value>/etc/hadoop/conf,/usr/iop/current/hadoop-client/*,/usr/iop/current/hadoop-client/lib/*,/usr/iop/current/hadoop-hdfs-client/*,/usr/iop/current/hadoop-hdfs-client/lib/*,/usr/iop/current/hadoop-yarn-client/*,/usr/iop/current/hadoop-yarn-client/lib/*</value>
+    <description>Classpath for typical applications.</description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.vmem-pmem-ratio</name>
+    <value>5</value>
+    <description>Ratio between virtual memory to physical memory when
+      setting memory limits for containers. Container allocations are
+      expressed in terms of physical memory, and virtual memory usage
+      is allowed to exceed this allocation by this ratio.
+    </description>
+    <display-name>Virtual Memory Ratio</display-name>
+    <value-attributes>
+      <type>float</type>
+      <minimum>0.1</minimum>
+      <maximum>5.0</maximum>
+      <increment-step>0.1</increment-step>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.container-executor.class</name>
+    <value>org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor</value>
+    <description>ContainerExecutor for launching containers</description>
+    <depends-on>
+      <property>
+        <type>yarn-env</type>
+        <name>yarn_cgroups_enabled</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.group</name>
+    <value>hadoop</value>
+    <description>Unix group of the NodeManager</description>
+    <depends-on>
+      <property>
+        <type>yarn-env</type>
+        <name>yarn_cgroups_enabled</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.aux-services</name>
+    <value>mapreduce_shuffle</value>
+    <description>Auxilliary services of NodeManager. A valid service name should only contain a-zA-Z0-9_ and can
+      not start with numbers</description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
+    <value>org.apache.hadoop.mapred.ShuffleHandler</value>
+    <description>The auxiliary service class to use </description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.log-dirs</name>
+    <value>/hadoop/yarn/log</value>
+    <description>
+      Where to store container logs. An application's localized log directory
+      will be found in ${yarn.nodemanager.log-dirs}/application_${appid}.
+      Individual containers' log directories will be below this, in directories
+      named container_{$contid}. Each container directory will contain the files
+      stderr, stdin, and syslog generated by that container.
+    </description>
+    <value-attributes>
+      <type>directories</type>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.local-dirs</name>
+    <value>/hadoop/yarn/local</value>
+    <description>
+      List of directories to store localized files in. An
+      application's localized file directory will be found in:
+      ${yarn.nodemanager.local-dirs}/usercache/${user}/appcache/application_${appid}.
+      Individual containers' work directories, called container_${contid}, will
+      be subdirectories of this.
+    </description>
+    <value-attributes>
+      <type>directories</type>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.container-monitor.interval-ms</name>
+    <value>3000</value>
+    <description>
+      The interval, in milliseconds, for which the node manager
+      waits  between two cycles of monitoring its containers' memory usage.
+    </description>
+  </property>
+
+  <!--
+  <property>
+    <name>yarn.nodemanager.health-checker.script.path</name>
+    <value>/etc/hadoop/conf/health_check_nodemanager</value>
+    <description>The health check script to run.</description>
+  </property>
+   -->
+
+  <property>
+    <name>yarn.nodemanager.health-checker.interval-ms</name>
+    <value>135000</value>
+    <description>Frequency of running node health script.</description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.health-checker.script.timeout-ms</name>
+    <value>60000</value>
+    <description>Script time out period.</description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.log.retain-second</name>
+    <value>604800</value>
+    <description>
+      Time in seconds to retain user logs. Only applicable if
+      log aggregation is disabled.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.log-aggregation-enable</name>
+    <value>true</value>
+    <description>Whether to enable log aggregation. </description>
+    <display-name>Enable Log Aggregation</display-name>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.remote-app-log-dir</name>
+    <value>/app-logs</value>
+    <description>Location to aggregate logs to. </description>
+    <value-attributes>
+      <type>directory</type>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.remote-app-log-dir-suffix</name>
+    <value>logs</value>
+    <description>
+      The remote log dir will be created at
+      {yarn.nodemanager.remote-app-log-dir}/${user}/{thisParam}.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.log-aggregation.compression-type</name>
+    <value>gz</value>
+    <description>
+      T-file compression types used to compress aggregated logs.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.delete.debug-delay-sec</name>
+    <value>0</value>
+    <description>
+      Number of seconds after an application finishes before the nodemanager's
+      DeletionService will delete the application's localized file directory
+      and log directory.
+
+      To diagnose Yarn application problems, set this property's value large
+      enough (for example, to 600 = 10 minutes) to permit examination of these
+      directories. After changing the property's value, you must restart the
+      nodemanager in order for it to have an effect.
+
+      The roots of Yarn applications' work directories is configurable with
+      the yarn.nodemanager.local-dirs property (see below), and the roots
+      of the Yarn applications' log directories is configurable with the
+      yarn.nodemanager.log-dirs property (see also below).
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.log-aggregation.retain-seconds</name>
+    <value>2592000</value>
+    <description>
+      How long to keep aggregation logs before deleting them. -1 disables.
+      Be careful set this too small and you will spam the name node.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.admin-env</name>
+    <value>MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX</value>
+    <description>
+      Environment variables that should be forwarded from the NodeManager's
+      environment to the container's.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.disk-health-checker.min-healthy-disks</name>
+    <value>0.25</value>
+    <description>
+      The minimum fraction of number of disks to be healthy for the nodemanager
+      to launch new containers. This correspond to both
+      yarn-nodemanager.local-dirs and yarn.nodemanager.log-dirs. i.e.
+      If there are less number of healthy local-dirs (or log-dirs) available,
+      then new containers will not be launched on this node.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.am.max-attempts</name>
+    <value>2</value>
+    <description>
+      The maximum number of application attempts. It's a global
+      setting for all application masters. Each application master can specify
+      its individual maximum number of application attempts via the API, but the
+      individual number cannot be more than the global upper bound. If it is,
+      the resourcemanager will override it. The default number is set to 2, to
+      allow at least one retry for AM.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.webapp.address</name>
+    <value>localhost:8088</value>
+    <description>
+      The address of the RM web application.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.vmem-check-enabled</name>
+    <value>true</value>
+    <description>
+      Whether virtual memory limits will be enforced for containers.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.log.server.url</name>
+    <value>http://localhost:19888/jobhistory/logs</value>
+    <description>
+      URI for the HistoryServer's log resource
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.nodes.exclude-path</name>
+    <value>/etc/hadoop/conf/yarn.exclude</value>
+    <description>
+      Names a file that contains a list of hosts that are
+      not permitted to connect to the resource manager.  The full pathname of the
+      file must be specified.  If the value is empty, no hosts are
+      excluded.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.timeline-service.enabled</name>
+    <value>true</value>
+    <description>Indicate to clients whether timeline service is enabled or not.
+      If enabled, clients will put entities and events to the timeline server.
+    </description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>yarn.timeline-service.store-class</name>
+    <!--<value>org.apache.hadoop.yarn.server.applicationhistoryservice.timeline.LeveldbTimelineStore</value> -->
+    <value>org.apache.hadoop.yarn.server.timeline.LeveldbTimelineStore</value>
+    <description>
+      Store class name for timeline store
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.timeline-service.generic-application-history.store-class</name>
+    <value>org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore</value>
+    <description>
+      Store class name for history store, defaulting to file system store
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.timeline-service.leveldb-timeline-store.path</name>
+    <value>/var/log/hadoop-yarn/timeline</value>
+    <description>
+      Store file name for leveldb timeline store
+    </description>
+    <value-attributes>
+      <type>directory</type>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>yarn.timeline-service.webapp.address</name>
+    <value>localhost:8188</value>
+    <description>
+      The http address of the timeline service web application.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.timeline-service.webapp.https.address</name>
+    <value>localhost:8190</value>
+    <description>
+      The http address of the timeline service web application.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.timeline-service.address</name>
+    <value>localhost:10200</value>
+    <description>
+      This is default address for the timeline server to start
+      the RPC server.
+    </description>
+  </property>
+  <property>
+    <name>yarn.timeline-service.ttl-enable</name>
+    <description>Enable age off of timeline store data.</description>
+    <value>true</value>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+  </property>
+  <property>
+    <description>Time to live for timeline store data in milliseconds.</description>
+    <name>yarn.timeline-service.ttl-ms</name>
+    <value>2678400000</value>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+  </property>
+  <property>
+    <description>Length of time to wait between deletion cycles of leveldb timeline store in milliseconds.</description>
+    <name>yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms</name>
+    <value>300000</value>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+  </property>
+
+  <!-- Default Values Set for IOP Stack -->
+
+  <!-- Fault Tolerance-->
+  <property>
+    <name>yarn.nodemanager.recovery.enabled</name>
+    <value>false</value>
+    <description>Enable the node manager to recover after starting</description>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.recovery.enabled</name>
+    <value>false</value>
+    <description>
+      Enable RM to recover state after starting.
+      If true, then yarn.resourcemanager.store.class must be specified.
+    </description>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.work-preserving-recovery.enabled</name>
+    <value>false</value>
+    <description>
+      Enable RM work preserving recovery. This configuration is private to YARN for experimenting the feature.
+    </description>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.zk-address</name>
+    <value>localhost:2181</value>
+    <description>
+      List Host:Port of the ZooKeeper servers to be used by the RM. comma separated host:port pairs, each corresponding to a zk server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" If the optional chroot suffix is used the example would look like: "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002/app/a" where the client would be rooted at "/app/a" and all paths would be relative to this root - ie getting/setting/etc...  "/foo/bar" would result in operations being run on "/app/a/foo/bar" (from the server perspective).
+    </description>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.connect.retry-interval.ms</name>
+    <value>30000</value>
+    <description>How often to try connecting to the ResourceManager.</description>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.connect.max-wait.ms</name>
+    <value>900000</value>
+    <description>Maximum time to wait to establish connection to ResourceManager</description>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.ha.enabled</name>
+    <value>false</value>
+    <description>enable RM HA or not</description>
+  </property>
+
+  <!-- Isolation -->
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.resources-handler.class</name>
+    <value>org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler</value>
+    <description>Pre-requisite to use CGroups</description>
+  </property>
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.cgroups.hierarchy</name>
+    <value>hadoop-yarn</value>
+    <description>Name of the Cgroups hierarchy under which all YARN jobs will be launched</description>
+  </property>
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.cgroups.mount</name>
+    <value>false</value>
+    <description>If true, YARN will automount the CGroup, however the directory needs to already exist; else, the cgroup should be mounted by the admin</description>
+  </property>
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage</name>
+    <value>false</value>
+    <description>Strictly limit CPU resource usage to allocated usage even if spare CPU is available</description>
+  </property>
+
+  <!-- Scheduler -->
+  <property>
+    <name>yarn.nodemanager.resource.cpu-vcores</name>
+    <value>8</value>
+    <description></description>
+    <display-name>Number of virtual cores</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>32</maximum>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.nodemanager.resource.percentage-physical-cpu-limit</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.resource.percentage-physical-cpu-limit</name>
+    <value>80</value>
+    <description>The amount of CPU allocated for YARN containers - only effective when used with CGroups</description>
+    <display-name>Percentage of physical CPU allocated for all containers on a node</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>100</maximum>
+      <increment-step>1</increment-step>
+    </value-attributes>
+  </property>
+
+ <property>
+    <name>yarn.timeline-service.http-authentication.type</name>
+    <value>simple</value>
+    <description>
+      Defines authentication used for the Timeline Server HTTP endpoint.
+      Supported values are: simple | kerberos | $AUTHENTICATION_HANDLER_CLASSNAME
+    </description>
+  </property>
+  <property>
+    <name>yarn.timeline-service.http-authentication.simple.anonymous.allowed</name>
+    <value>true</value>
+    <description></description>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled</name>
+    <value>false</value>
+    <description>
+      Flag to enable override of the default kerberos authentication filter with
+      the RM authentication filter to allow authentication using delegation
+      tokens(fallback to kerberos if the tokens are missing).
+      Only applicable when the http authentication type is kerberos.
+    </description>
+  </property>
+
+   <property>
+     <name>yarn.timeline-service.bind-host</name>
+     <value>0.0.0.0</value>
+   </property>
+   <property>
+     <name>yarn.nodemanager.bind-host</name>
+     <value>0.0.0.0</value>
+   </property>
+   <property>
+     <name>yarn.resourcemanager.bind-host</name>
+     <value>0.0.0.0</value>
+   </property>
+   <property>
+     <name>yarn.resourcemanager.system-metrics-publisher.enabled</name>
+     <value>true</value>
+   </property>
+  <property>
+     <name>hadoop.registry.rm.enabled</name>
+     <value>false</value>
+     <description> Is the registry enabled: does the RM start it up, create the user and system paths, and purge service records when
+     </description>
+  </property>
+  <property>
+     <name>hadoop.registry.zk.quorum</name>
+     <value>localhost:2181</value>
+     <description> List of hostname:port pairs defining the zookeeper quorum binding for the registry </description>
+  </property>
+
+  <property>
+    <name>yarn.node-labels.enabled</name>
+    <value>false</value>
+    <description>
+      Enable node labels to restrict YARN applications so that they run only on cluster nodes that have a specified node label.
+    </description>
+    <display-name>Node Labels</display-name>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>Enabled</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>Disabled</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.scheduler.monitor.enable</name>
+    <value>false</value>
+    <display-name>Pre-emption</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>Enabled</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>Disabled</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.minimum-allocation-vcores</name>
+    <value>1</value>
+    <description></description>
+    <display-name>Minimum Container Size (VCores)</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>8</maximum>
+      <increment-step>1</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.nodemanager.resource.cpu-vcores</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.maximum-allocation-vcores</name>
+    <value>8</value>
+    <description></description>
+    <display-name>Maximum Container Size (VCores)</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>8</maximum>
+      <increment-step>1</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.nodemanager.resource.cpu-vcores</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>yarn.http.policy</name>
+    <value>HTTP_ONLY</value>
+    <description>
+      This configures the HTTP endpoint for Yarn Daemons.The following values are supported: - HTTP_ONLY : Service is provided only on http - HTTPS_ONLY : Service is provided only on https
+    </description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/kerberos.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/kerberos.json
new file mode 100755
index 0000000..e3a2d58
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/kerberos.json
@@ -0,0 +1,208 @@
+{
+  "services": [
+    {
+      "name": "YARN",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/hdfs"
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "configurations": [
+        {
+          "yarn-site": {
+            "yarn.timeline-service.enabled": "false",
+            "yarn.timeline-service.http-authentication.type": "kerberos",
+            "yarn.acl.enable": "true",
+            "yarn.timeline-service.http-authentication.signature.secret": "",
+            "yarn.timeline-service.http-authentication.signature.secret.file": "",
+            "yarn.timeline-service.http-authentication.signer.secret.provider": "",
+            "yarn.timeline-service.http-authentication.signer.secret.provider.object": "",
+            "yarn.timeline-service.http-authentication.token.validity": "",
+            "yarn.timeline-service.http-authentication.cookie.domain": "",
+            "yarn.timeline-service.http-authentication.cookie.path": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.users": "",
+            "yarn.timeline-service.http-authentication.proxyusers.*.groups": "",
+            "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
+            "yarn.resourcemanager.proxyusers.*.groups": "",
+            "yarn.resourcemanager.proxyusers.*.hosts": "",
+            "yarn.resourcemanager.proxyusers.*.users": "",
+            "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
+            "yarn.nodemanager.linux-container-executor.cgroups.mount-path": ""
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "NODEMANAGER",
+          "identities": [
+            {
+              "name": "nodemanager_nm",
+              "principal": {
+                "value": "nm/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.nodemanager.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/nm.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.nodemanager.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-keytab-file"
+              }
+            }
+          ],
+          "configurations": [
+            {
+              "yarn-site": {
+                "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor"
+              }
+            }
+          ]
+        },
+        {
+          "name": "RESOURCEMANAGER",
+          "identities": [
+            {
+              "name": "resource_manager_rm",
+              "principal": {
+                "value": "rm/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.resourcemanager.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/rm.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.resourcemanager.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file"
+              }
+            }
+          ]
+        },
+        {
+          "name": "APP_TIMELINE_SERVER",
+          "identities": [
+            {
+              "name": "app_timeline_server_yarn",
+              "principal": {
+                "value": "yarn/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.timeline-service.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/yarn.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.timeline-service.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab"
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "name": "MAPREDUCE2",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/hdfs"
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "components": [
+        {
+          "name": "HISTORYSERVER",
+          "identities": [
+            {
+              "name": "history_server_jhs",
+              "principal": {
+                "value": "jhs/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "mapred-site/mapreduce.jobhistory.principal",
+                "local_username": "${mapred-env/mapred_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/jhs.service.keytab",
+                "owner": {
+                  "name": "${mapred-env/mapred_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "mapred-site/mapreduce.jobhistory.keytab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/metainfo.xml
new file mode 100755
index 0000000..b28ee8a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/metainfo.xml
@@ -0,0 +1,264 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>YARN</name>
+      <displayName>YARN</displayName>
+      <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
+      <version>2.7.1</version>
+      <components>
+
+        <component>
+          <name>APP_TIMELINE_SERVER</name>
+          <displayName>App Timeline Server</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/application_timeline_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>RESOURCEMANAGER</name>
+          <displayName>ResourceManager</displayName>
+          <category>MASTER</category>
+          <cardinality>1-2</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/resourcemanager.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/resourcemanager.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+            <customCommand>
+              <name>REFRESHQUEUES</name>
+              <commandScript>
+                <script>scripts/resourcemanager.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
+          <configuration-dependencies>
+            <config-type>capacity-scheduler</config-type>
+          </configuration-dependencies>
+        </component>
+
+        <component>
+          <name>NODEMANAGER</name>
+          <displayName>NodeManager</displayName>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/nodemanager.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>YARN_CLIENT</name>
+          <displayName>YARN Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/yarn_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>xml</type>
+              <fileName>yarn-site.xml</fileName>
+              <dictionaryName>yarn-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>xml</type>
+              <fileName>core-site.xml</fileName>
+              <dictionaryName>core-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>yarn-env.sh</fileName>
+              <dictionaryName>yarn-env</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>log4j.properties</fileName>
+              <dictionaryName>hdfs-log4j,yarn-log4j</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>xml</type>
+              <fileName>capacity-scheduler.xml</fileName>
+              <dictionaryName>capacity-scheduler</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>hadoop-yarn</name>
+            </package>
+            <package>
+              <name>hadoop-mapreduce</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <requiredServices>
+        <service>HDFS</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>yarn-site</config-type>
+        <config-type>yarn-env</config-type>
+        <config-type>core-site</config-type>
+        <config-type>yarn-log4j</config-type>
+      </configuration-dependencies>
+
+      <widgetsFileName>YARN_widgets.json</widgetsFileName>
+      <metricsFileName>YARN_metrics.json</metricsFileName>
+
+    </service>
+
+    <service>
+      <name>MAPREDUCE2</name>
+      <displayName>MapReduce2</displayName>
+      <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
+      <version>2.7.1.4.1</version>
+      <components>
+        <component>
+          <name>HISTORYSERVER</name>
+          <displayName>History Server</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <auto-deploy>
+            <enabled>true</enabled>
+            <co-locate>YARN/RESOURCEMANAGER</co-locate>
+          </auto-deploy>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/historyserver.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>MAPREDUCE2_CLIENT</name>
+          <displayName>MapReduce2 Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/mapreduce2_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>xml</type>
+              <fileName>mapred-site.xml</fileName>
+              <dictionaryName>mapred-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>xml</type>
+              <fileName>core-site.xml</fileName>
+              <dictionaryName>core-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>mapred-env.sh</fileName>
+              <dictionaryName>mapred-env</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>hadoop-mapreduce</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/mapred_service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <requiredServices>
+        <service>YARN</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>core-site</config-type>
+        <config-type>mapred-site</config-type>
+        <config-type>mapred-env</config-type>
+      </configuration-dependencies>
+
+      <configuration-dir>configuration-mapred</configuration-dir>
+      <restartRequiredAfterRackChange>true</restartRequiredAfterRackChange>
+      <widgetsFileName>MAPREDUCE2_widgets.json</widgetsFileName>
+      <metricsFileName>MAPREDUCE2_metrics.json</metricsFileName>
+
+    </service>
+
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/alerts/alert_nodemanager_health.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/alerts/alert_nodemanager_health.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/alerts/alert_nodemanager_health.py
new file mode 100755
index 0000000..c462081
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/alerts/alert_nodemanager_health.py
@@ -0,0 +1,143 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import json
+import socket
+import urllib2
+from ambari_commons import OSCheck
+from ambari_commons.inet_utils import resolve_address
+
+RESULT_CODE_OK = 'OK'
+RESULT_CODE_CRITICAL = 'CRITICAL'
+RESULT_CODE_UNKNOWN = 'UNKNOWN'
+
+NODEMANAGER_HTTP_ADDRESS_KEY = '{{yarn-site/yarn.nodemanager.webapp.address}}'
+NODEMANAGER_HTTPS_ADDRESS_KEY = '{{yarn-site/yarn.nodemanager.webapp.https.address}}'
+YARN_HTTP_POLICY_KEY = '{{yarn-site/yarn.http.policy}}'
+
+OK_MESSAGE = 'NodeManager Healthy'
+CRITICAL_CONNECTION_MESSAGE = 'Connection failed to {0} ({1})'
+CRITICAL_HTTP_STATUS_MESSAGE = 'HTTP {0} returned from {1} ({2})'
+CRITICAL_NODEMANAGER_STATUS_MESSAGE = 'NodeManager returned an unexpected status of "{0}"'
+CRITICAL_NODEMANAGER_UNKNOWN_JSON_MESSAGE = 'Unable to determine NodeManager health from unexpected JSON response'
+
+NODEMANAGER_DEFAULT_PORT = 8042
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (NODEMANAGER_HTTP_ADDRESS_KEY,NODEMANAGER_HTTPS_ADDRESS_KEY,
+  YARN_HTTP_POLICY_KEY)
+
+
+def execute(parameters=None, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  parameters (dictionary): a mapping of parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+  result_code = RESULT_CODE_UNKNOWN
+
+  if parameters is None:
+    return (result_code, ['There were no parameters supplied to the script.'])
+
+  scheme = 'http'
+  http_uri = None
+  https_uri = None
+  http_policy = 'HTTP_ONLY'
+
+  if NODEMANAGER_HTTP_ADDRESS_KEY in parameters:
+    http_uri = parameters[NODEMANAGER_HTTP_ADDRESS_KEY]
+
+  if NODEMANAGER_HTTPS_ADDRESS_KEY in parameters:
+    https_uri = parameters[NODEMANAGER_HTTPS_ADDRESS_KEY]
+
+  if YARN_HTTP_POLICY_KEY in parameters:
+    http_policy = parameters[YARN_HTTP_POLICY_KEY]
+
+  # determine the right URI and whether to use SSL
+  uri = http_uri
+  if http_policy == 'HTTPS_ONLY':
+    scheme = 'https'
+
+    if https_uri is not None:
+      uri = https_uri
+
+  label = ''
+  url_response = None
+  node_healthy = 'false'
+  total_time = 0
+
+  # some yarn-site structures don't have the web ui address
+  if uri is None:
+    if host_name is None:
+      host_name = socket.getfqdn()
+
+    uri = '{0}:{1}'.format(host_name, NODEMANAGER_DEFAULT_PORT)
+
+  if OSCheck.is_windows_family():
+    uri_host, uri_port = uri.split(':')
+    # on windows 0.0.0.0 is invalid address to connect but on linux it resolved to 127.0.0.1
+    uri_host = resolve_address(uri_host)
+    uri = '{0}:{1}'.format(uri_host, uri_port)
+
+  query = "{0}://{1}/ws/v1/node/info".format(scheme,uri)
+
+  try:
+    # execute the query for the JSON that includes templeton status
+    url_response = urllib2.urlopen(query)
+  except urllib2.HTTPError, httpError:
+    label = CRITICAL_HTTP_STATUS_MESSAGE.format(str(httpError.code), query,
+      str(httpError))
+
+    return (RESULT_CODE_CRITICAL, [label])
+  except Exception, exception:
+    label = CRITICAL_CONNECTION_MESSAGE.format(query, str(exception))
+    return (RESULT_CODE_CRITICAL, [label])
+
+  # URL response received, parse it
+  try:
+    json_response = json.loads(url_response.read())
+    node_healthy = json_response['nodeInfo']['nodeHealthy']
+
+    # convert boolean to string
+    node_healthy = str(node_healthy)
+  except:
+    return (RESULT_CODE_CRITICAL, [query])
+  finally:
+    if url_response is not None:
+      try:
+        url_response.close()
+      except:
+        pass
+
+  # proper JSON received, compare against known value
+  if node_healthy.lower() == 'true':
+    result_code = RESULT_CODE_OK
+    label = OK_MESSAGE
+  else:
+    result_code = RESULT_CODE_CRITICAL
+    label = CRITICAL_NODEMANAGER_STATUS_MESSAGE.format(node_healthy)
+
+  return (result_code, [label])

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/alerts/alert_nodemanagers_summary.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/alerts/alert_nodemanagers_summary.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/alerts/alert_nodemanagers_summary.py
new file mode 100755
index 0000000..72fe644
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/alerts/alert_nodemanagers_summary.py
@@ -0,0 +1,122 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import urllib2
+import json
+
+from ambari_commons.urllib_handlers import RefreshHeaderProcessor
+
+ERROR_LABEL = '{0} NodeManager{1} {2} unhealthy.'
+OK_LABEL = 'All NodeManagers are healthy'
+
+NODEMANAGER_HTTP_ADDRESS_KEY = '{{yarn-site/yarn.resourcemanager.webapp.address}}'
+NODEMANAGER_HTTPS_ADDRESS_KEY = '{{yarn-site/yarn.resourcemanager.webapp.https.address}}'
+YARN_HTTP_POLICY_KEY = '{{yarn-site/yarn.http.policy}}'
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return NODEMANAGER_HTTP_ADDRESS_KEY, NODEMANAGER_HTTPS_ADDRESS_KEY, \
+    YARN_HTTP_POLICY_KEY
+
+
+def execute(parameters=None, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  parameters (dictionary): a mapping of parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  if parameters is None:
+    return (('UNKNOWN', ['There were no parameters supplied to the script.']))
+
+  scheme = 'http'
+  http_uri = None
+  https_uri = None
+  http_policy = 'HTTP_ONLY'
+
+  if NODEMANAGER_HTTP_ADDRESS_KEY in parameters:
+    http_uri = parameters[NODEMANAGER_HTTP_ADDRESS_KEY]
+
+  if NODEMANAGER_HTTPS_ADDRESS_KEY in parameters:
+    https_uri = parameters[NODEMANAGER_HTTPS_ADDRESS_KEY]
+
+  if YARN_HTTP_POLICY_KEY in parameters:
+    http_policy = parameters[YARN_HTTP_POLICY_KEY]
+
+  # determine the right URI and whether to use SSL
+  uri = http_uri
+  if http_policy == 'HTTPS_ONLY':
+    scheme = 'https'
+
+    if https_uri is not None:
+      uri = https_uri
+
+  live_nodemanagers_qry = "{0}://{1}/jmx?qry=Hadoop:service=ResourceManager,name=RMNMInfo".format(scheme, uri)
+
+  try:
+    live_nodemanagers = json.loads(get_value_from_jmx(live_nodemanagers_qry, "LiveNodeManagers"))
+
+    unhealthy_count = 0
+
+    for nodemanager in live_nodemanagers:
+      health_report = nodemanager['State']
+      if health_report == 'UNHEALTHY':
+        unhealthy_count += 1
+
+    if unhealthy_count == 0:
+      result_code = 'OK'
+      label = OK_LABEL
+    else:
+      result_code = 'CRITICAL'
+      if unhealthy_count == 1:
+        label = ERROR_LABEL.format(unhealthy_count, '', 'is')
+      else:
+        label = ERROR_LABEL.format(unhealthy_count, 's', 'are')
+
+  except Exception, e:
+    label = str(e)
+    result_code = 'UNKNOWN'
+
+  return ((result_code, [label]))
+
+
+def get_value_from_jmx(query, jmx_property):
+  response = None
+
+  try:
+    # use a customer header process that will look for the non-standard
+    # "Refresh" header and attempt to follow the redirect
+    url_opener = urllib2.build_opener(RefreshHeaderProcessor())
+    response = url_opener.open(query)
+
+    data = response.read()
+    data_dict = json.loads(data)
+    return data_dict["beans"][0][jmx_property]
+  finally:
+    if response is not None:
+      try:
+        response.close()
+      except:
+        pass

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/files/validateYarnComponentStatus.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/files/validateYarnComponentStatus.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/files/validateYarnComponentStatus.py
new file mode 100755
index 0000000..862b4c2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/files/validateYarnComponentStatus.py
@@ -0,0 +1,170 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import optparse
+import subprocess
+import json
+
+RESOURCEMANAGER = 'rm'
+NODEMANAGER = 'nm'
+HISTORYSERVER = 'hs'
+
+STARTED_STATE = 'STARTED'
+RUNNING_STATE = 'RUNNING'
+
+#Return reponse for given path and address
+def getResponse(path, address, ssl_enabled):
+
+  command = "curl"
+  httpGssnegotiate = "--negotiate"
+  userpswd = "-u:"
+  insecure = "-k"# This is smoke test, no need to check CA of server
+  if ssl_enabled:
+    url = 'https://' + address + path
+  else:
+    url = 'http://' + address + path
+
+  command_with_flags = [command,httpGssnegotiate,userpswd,insecure,url]
+
+  proc = subprocess.Popen(command_with_flags, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+  (stdout, stderr) = proc.communicate()
+  response = json.loads(stdout)
+  if response == None:
+    print 'There is no response for url: ' + str(url)
+    raise Exception('There is no response for url: ' + str(url))
+  return response
+
+#Verify that REST api is available for given component
+def validateAvailability(component, path, addresses, ssl_enabled):
+  responses = {}
+  for address in addresses.split(','):
+    try:
+      responses[address] = getResponse(path, address, ssl_enabled)
+    except Exception as e:
+      print 'Error checking availability status of component.', e
+
+  if not responses:
+    exit(1)
+
+  is_valid = validateAvailabilityResponse(component, responses.values()[0])
+  if not is_valid:
+    exit(1)
+
+#Validate component-specific response
+def validateAvailabilityResponse(component, response):
+  try:
+    if component == RESOURCEMANAGER:
+      rm_state = response['clusterInfo']['state']
+      if rm_state == STARTED_STATE:
+        return True
+      else:
+        print 'Resourcemanager is not started'
+        return False
+
+    elif component == NODEMANAGER:
+      node_healthy = bool(response['nodeInfo']['nodeHealthy'])
+      if node_healthy:
+        return True
+      else:
+        return False
+    elif component == HISTORYSERVER:
+      hs_start_time = response['historyInfo']['startedOn']
+      if hs_start_time > 0:
+        return True
+      else:
+        return False
+    else:
+      return False
+  except Exception as e:
+    print 'Error validation of availability response for ' + str(component), e
+    return False
+
+#Verify that component has required resources to work
+def validateAbility(component, path, addresses, ssl_enabled):
+  responses = {}
+  for address in addresses.split(','):
+    try:
+      responses[address] = getResponse(path, address, ssl_enabled)
+    except Exception as e:
+      print 'Error checking ability of component.', e
+
+  if not responses:
+    exit(1)
+
+  is_valid = validateAbilityResponse(component, responses.values()[0])
+  if not is_valid:
+    exit(1)
+
+#Validate component-specific response that it has required resources to work
+def validateAbilityResponse(component, response):
+  try:
+    if component == RESOURCEMANAGER:
+      nodes = []
+      if response.has_key('nodes') and not response['nodes'] == None and response['nodes'].has_key('node'):
+        nodes = response['nodes']['node']
+      connected_nodes_count = len(nodes)
+      if connected_nodes_count == 0:
+        print 'There is no connected nodemanagers to resourcemanager'
+        return False
+      active_nodes = filter(lambda x: x['state'] == RUNNING_STATE, nodes)
+      active_nodes_count = len(active_nodes)
+
+      if connected_nodes_count == 0:
+        print 'There is no connected active nodemanagers to resourcemanager'
+        return False
+      else:
+        return True
+    else:
+      return False
+  except Exception as e:
+    print 'Error validation of ability response', e
+    return False
+
+#
+# Main.
+#
+def main():
+  parser = optparse.OptionParser(usage="usage: %prog [options] component ")
+  parser.add_option("-p", "--port", dest="address", help="Host:Port for REST API of a desired component")
+  parser.add_option("-s", "--ssl", dest="ssl_enabled", help="Is SSL enabled for UI of component")
+
+  (options, args) = parser.parse_args()
+
+  component = args[0]
+
+  address = options.address
+  ssl_enabled = (options.ssl_enabled) in 'true'
+  if component == RESOURCEMANAGER:
+    path = '/ws/v1/cluster/info'
+  elif component == NODEMANAGER:
+    path = '/ws/v1/node/info'
+  elif component == HISTORYSERVER:
+    path = '/ws/v1/history/info'
+  else:
+    parser.error("Invalid component")
+
+  validateAvailability(component, path, address, ssl_enabled)
+
+  if component == RESOURCEMANAGER:
+    path = '/ws/v1/cluster/nodes'
+    validateAbility(component, path, address, ssl_enabled)
+
+if __name__ == "__main__":
+  main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/__init__.py
new file mode 100755
index 0000000..35de4bb
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/__init__.py
@@ -0,0 +1,20 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/application_timeline_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/application_timeline_server.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/application_timeline_server.py
new file mode 100755
index 0000000..5152cf9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/application_timeline_server.py
@@ -0,0 +1,139 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties,\
+  FILE_TYPE_XML
+from resource_management.libraries.functions.format import format
+
+from yarn import yarn
+from service import service
+
+class ApplicationTimelineServer(Script):
+
+  def get_component_name(self):
+    return "hadoop-yarn-timelineserver"
+
+  def install(self, env):
+    self.install_packages(env)
+    #self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    yarn(name='apptimelineserver')
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Stack Upgrade pre-restart")
+    import params
+    env.set_params(params)
+
+    if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-yarn-timelineserver", params.version)
+      #Execute(format("stack-select set hadoop-yarn-timelineserver {version}"))
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    service('timelineserver', action='start')
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    service('timelineserver', action='stop')
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    Execute(format("mv {yarn_historyserver_pid_file_old} {yarn_historyserver_pid_file}"),
+            only_if = format("test -e {yarn_historyserver_pid_file_old}", user=status_params.yarn_user))
+    functions.check_process_status(status_params.yarn_historyserver_pid_file)
+
+  def security_status(self, env):
+    import status_params
+    env.set_params(status_params)
+    if status_params.security_enabled:
+      props_value_check = {"yarn.timeline-service.enabled": "true",
+                           "yarn.timeline-service.http-authentication.type": "kerberos",
+                           "yarn.acl.enable": "true"}
+      props_empty_check = ["yarn.timeline-service.principal",
+                           "yarn.timeline-service.keytab",
+                           "yarn.timeline-service.http-authentication.kerberos.principal",
+                           "yarn.timeline-service.http-authentication.kerberos.keytab"]
+
+      props_read_check = ["yarn.timeline-service.keytab",
+                          "yarn.timeline-service.http-authentication.kerberos.keytab"]
+      yarn_site_props = build_expectations('yarn-site', props_value_check, props_empty_check,
+                                                  props_read_check)
+
+      yarn_expectations ={}
+      yarn_expectations.update(yarn_site_props)
+
+      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
+                                                   {'yarn-site.xml': FILE_TYPE_XML})
+      result_issues = validate_security_config_properties(security_params, yarn_expectations)
+      if not result_issues: # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if ( 'yarn-site' not in security_params
+               or 'yarn.timeline-service.keytab' not in security_params['yarn-site']
+               or 'yarn.timeline-service.principal' not in security_params['yarn-site']) \
+            or 'yarn.timeline-service.http-authentication.kerberos.keytab' not in security_params['yarn-site'] \
+            or 'yarn.timeline-service.http-authentication.kerberos.principal' not in security_params['yarn-site']:
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out(
+              {"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.yarn_user,
+                                security_params['yarn-site']['yarn.timeline-service.keytab'],
+                                security_params['yarn-site']['yarn.timeline-service.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.yarn_user,
+                                security_params['yarn-site']['yarn.timeline-service.http-authentication.kerberos.keytab'],
+                                security_params['yarn-site']['yarn.timeline-service.http-authentication.kerberos.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+
+if __name__ == "__main__":
+  ApplicationTimelineServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/historyserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/historyserver.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/historyserver.py
new file mode 100755
index 0000000..bb384e4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/historyserver.py
@@ -0,0 +1,155 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
+from resource_management.libraries.functions.format import format
+from resource_management.core.source import Template
+from resource_management.core.logger import Logger
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_XML
+
+from yarn import yarn
+from service import service
+
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+
+class HistoryServer(Script):
+
+  def get_component_name(self):
+    return "hadoop-mapreduce-historyserver"
+
+  def install(self, env):
+    self.install_packages(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    yarn(name="historyserver")
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Stack Upgrade pre-restart")
+    import params
+    env.set_params(params)
+
+    if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-mapreduce-historyserver", params.version)
+      # MC Hammer said, "Can't touch this"
+      copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
+      copy_to_hdfs("slider", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
+      params.HdfsResource(None, action="execute")
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+
+    # MC Hammer said, "Can't touch this"
+    resource_created = copy_to_hdfs(
+      "mapreduce",
+      params.user_group,
+      params.hdfs_user,
+      host_sys_prepped=params.host_sys_prepped)
+    resource_created = copy_to_hdfs(
+      "slider",
+      params.user_group,
+      params.hdfs_user,
+      host_sys_prepped=params.host_sys_prepped) or resource_created
+    if resource_created:
+      params.HdfsResource(None, action="execute")
+    service('historyserver', action='start', serviceName='mapreduce')
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    service('historyserver', action='stop', serviceName='mapreduce')
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.mapred_historyserver_pid_file)
+
+  def security_status(self, env):
+    import status_params
+    env.set_params(status_params)
+    if status_params.security_enabled:
+      expectations = {}
+      expectations.update(build_expectations('mapred-site',
+                                             None,
+                                             [
+                                               'mapreduce.jobhistory.keytab',
+                                               'mapreduce.jobhistory.principal',
+                                               'mapreduce.jobhistory.webapp.spnego-keytab-file',
+                                               'mapreduce.jobhistory.webapp.spnego-principal'
+                                             ],
+                                             None))
+
+      security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
+                                                   {'mapred-site.xml': FILE_TYPE_XML})
+      result_issues = validate_security_config_properties(security_params, expectations)
+      if not result_issues: # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if ( 'mapred-site' not in security_params or
+               'mapreduce.jobhistory.keytab' not in security_params['mapred-site'] or
+               'mapreduce.jobhistory.principal' not in security_params['mapred-site'] or
+               'mapreduce.jobhistory.webapp.spnego-keytab-file' not in security_params['mapred-site'] or
+               'mapreduce.jobhistory.webapp.spnego-principal' not in security_params['mapred-site']):
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out(
+              {"securityIssuesFound": "Keytab file or principal not set."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.mapred_user,
+                                security_params['mapred-site']['mapreduce.jobhistory.keytab'],
+                                security_params['mapred-site']['mapreduce.jobhistory.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.mapred_user,
+                                security_params['mapred-site']['mapreduce.jobhistory.webapp.spnego-keytab-file'],
+                                security_params['mapred-site']['mapreduce.jobhistory.webapp.spnego-principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+if __name__ == "__main__":
+  HistoryServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/mapred_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/mapred_service_check.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/mapred_service_check.py
new file mode 100755
index 0000000..db10343
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/mapred_service_check.py
@@ -0,0 +1,80 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+class MapReduce2ServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    jar_path = format("{hadoop_mapred2_jar_location}/{hadoopMapredExamplesJarName}")
+    input_file = format("/user/{smokeuser}/mapredsmokeinput")
+    output_file = format("/user/{smokeuser}/mapredsmokeoutput")
+
+    cleanup_cmd = format("fs -rm -r -f {output_file} {input_file}")
+    create_file_cmd = format("fs -put /etc/passwd {input_file}")
+    test_cmd = format("fs -test -e {output_file}")
+    run_wordcount_job = format("jar {jar_path} wordcount {input_file} {output_file}")
+
+    log_dir = format("{mapred_log_dir_prefix}/{smokeuser}")
+    Directory(log_dir, owner=params.smokeuser, create_parents=True)
+
+    if params.security_enabled:
+      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
+
+      Execute(kinit_cmd,
+              user=params.smokeuser
+      )
+
+    ExecuteHadoop(cleanup_cmd,
+                  tries=1,
+                  try_sleep=5,
+                  user=params.smokeuser,
+                  bin_dir=params.execute_path,
+                  conf_dir=params.hadoop_conf_dir
+    )
+
+    ExecuteHadoop(create_file_cmd,
+                  tries=1,
+                  try_sleep=5,
+                  user=params.smokeuser,
+                  bin_dir=params.execute_path,
+                  conf_dir=params.hadoop_conf_dir
+    )
+
+    ExecuteHadoop(run_wordcount_job,
+                  tries=1,
+                  try_sleep=5,
+                  user=params.smokeuser,
+                  bin_dir=params.execute_path,
+                  conf_dir=params.hadoop_conf_dir,
+                  logoutput=True
+    )
+
+    ExecuteHadoop(test_cmd,
+                  user=params.smokeuser,
+                  bin_dir=params.execute_path,
+                  conf_dir=params.hadoop_conf_dir
+    )
+
+if __name__ == "__main__":
+  MapReduce2ServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/mapreduce2_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/mapreduce2_client.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/mapreduce2_client.py
new file mode 100755
index 0000000..77fd49e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/YARN/package/scripts/mapreduce2_client.py
@@ -0,0 +1,56 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+
+from yarn import yarn
+
+class MapReduce2Client(Script):
+
+  def get_component_name(self):
+    return "hadoop-client"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-client", params.version)
+      #Execute(format("stack-select set hadoop-client {version}"))
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    yarn()
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+  MapReduce2Client().execute()


[37/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/alerts/alert_checkpoint_time.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/alerts/alert_checkpoint_time.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/alerts/alert_checkpoint_time.py
new file mode 100755
index 0000000..7fbb9a2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/alerts/alert_checkpoint_time.py
@@ -0,0 +1,146 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import time
+import urllib2
+import json
+
+LABEL = 'Last Checkpoint: [{h} hours, {m} minutes, {tx} transactions]'
+
+NN_HTTP_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.http-address}}'
+NN_HTTPS_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.https-address}}'
+NN_HTTP_POLICY_KEY = '{{hdfs-site/dfs.http.policy}}'
+NN_CHECKPOINT_TX_KEY = '{{hdfs-site/dfs.namenode.checkpoint.txns}}'
+NN_CHECKPOINT_PERIOD_KEY = '{{hdfs-site/dfs.namenode.checkpoint.period}}'
+
+PERCENT_WARNING = 200
+PERCENT_CRITICAL = 200
+
+CHECKPOINT_TX_DEFAULT = 1000000
+CHECKPOINT_PERIOD_DEFAULT = 21600
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (NN_HTTP_ADDRESS_KEY, NN_HTTPS_ADDRESS_KEY, NN_HTTP_POLICY_KEY,
+      NN_CHECKPOINT_TX_KEY, NN_CHECKPOINT_PERIOD_KEY)
+
+
+def execute(parameters=None, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  parameters (dictionary): a mapping of parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  if parameters is None:
+    return (('UNKNOWN', ['There were no parameters supplied to the script.']))
+
+  uri = None
+  scheme = 'http'
+  http_uri = None
+  https_uri = None
+  http_policy = 'HTTP_ONLY'
+  percent_warning = PERCENT_WARNING
+  percent_critical = PERCENT_CRITICAL
+  checkpoint_tx = CHECKPOINT_TX_DEFAULT
+  checkpoint_period = CHECKPOINT_PERIOD_DEFAULT
+
+  if NN_HTTP_ADDRESS_KEY in parameters:
+    http_uri = parameters[NN_HTTP_ADDRESS_KEY]
+
+  if NN_HTTPS_ADDRESS_KEY in parameters:
+    https_uri = parameters[NN_HTTPS_ADDRESS_KEY]
+
+  if NN_HTTP_POLICY_KEY in parameters:
+    http_policy = parameters[NN_HTTP_POLICY_KEY]
+
+  if NN_CHECKPOINT_TX_KEY in parameters:
+    checkpoint_tx = parameters[NN_CHECKPOINT_TX_KEY]
+
+  if NN_CHECKPOINT_PERIOD_KEY in parameters:
+    checkpoint_period = parameters[NN_CHECKPOINT_PERIOD_KEY]
+
+  # determine the right URI and whether to use SSL
+  uri = http_uri
+  if http_policy == 'HTTPS_ONLY':
+    scheme = 'https'
+
+    if https_uri is not None:
+      uri = https_uri
+
+  current_time = int(round(time.time() * 1000))
+
+  last_checkpoint_time_qry = "{0}://{1}/jmx?qry=Hadoop:service=NameNode,name=FSNamesystem".format(scheme,uri)
+  journal_transaction_info_qry = "{0}://{1}/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo".format(scheme,uri)
+
+  # start out assuming an OK status
+  label = None
+  result_code = "OK"
+
+  try:
+    last_checkpoint_time = int(get_value_from_jmx(last_checkpoint_time_qry,"LastCheckpointTime"))
+    journal_transaction_info = get_value_from_jmx(journal_transaction_info_qry,"JournalTransactionInfo")
+    journal_transaction_info_dict = json.loads(journal_transaction_info)
+
+    last_tx = int(journal_transaction_info_dict['LastAppliedOrWrittenTxId'])
+    most_recent_tx = int(journal_transaction_info_dict['MostRecentCheckpointTxId'])
+    transaction_difference = last_tx - most_recent_tx
+
+    delta = (current_time - last_checkpoint_time)/1000
+
+    label = LABEL.format(h=get_time(delta)['h'], m=get_time(delta)['m'], tx=transaction_difference)
+
+    if (transaction_difference > int(checkpoint_tx)) and (float(delta) / int(checkpoint_period)*100 >= int(percent_critical)):
+      result_code = 'CRITICAL'
+    elif (transaction_difference > int(checkpoint_tx)) and (float(delta) / int(checkpoint_period)*100 >= int(percent_warning)):
+      result_code = 'WARNING'
+
+  except Exception, e:
+    label = str(e)
+    result_code = 'UNKNOWN'
+
+  return ((result_code, [label]))
+
+def get_time(delta):
+  h = int(delta/3600)
+  m = int((delta % 3600)/60)
+  return {'h':h, 'm':m}
+
+
+def get_value_from_jmx(query, jmx_property):
+  response = None
+
+  try:
+    response = urllib2.urlopen(query)
+    data = response.read()
+
+    data_dict = json.loads(data)
+    return data_dict["beans"][0][jmx_property]
+  finally:
+    if response is not None:
+      try:
+        response.close()
+      except:
+        pass

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/alerts/alert_ha_namenode_health.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/alerts/alert_ha_namenode_health.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/alerts/alert_ha_namenode_health.py
new file mode 100755
index 0000000..d48f831
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/alerts/alert_ha_namenode_health.py
@@ -0,0 +1,176 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import urllib2
+import json
+
+RESULT_STATE_OK = 'OK'
+RESULT_STATE_CRITICAL = 'CRITICAL'
+RESULT_STATE_UNKNOWN = 'UNKNOWN'
+RESULT_STATE_SKIPPED = 'SKIPPED'
+
+HDFS_NN_STATE_ACTIVE = 'active'
+HDFS_NN_STATE_STANDBY = 'standby'
+
+HDFS_SITE_KEY = '{{hdfs-site}}'
+NAMESERVICE_KEY = '{{hdfs-site/dfs.nameservices}}'
+NN_HTTP_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.http-address}}'
+NN_HTTPS_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.https-address}}'
+DFS_POLICY_KEY = '{{hdfs-site/dfs.http.policy}}'
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (HDFS_SITE_KEY, NAMESERVICE_KEY, NN_HTTP_ADDRESS_KEY,
+  NN_HTTPS_ADDRESS_KEY, DFS_POLICY_KEY)
+
+
+def execute(parameters=None, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  parameters (dictionary): a mapping of parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+  if parameters is None:
+    return (RESULT_STATE_UNKNOWN, ['There were no parameters supplied to the script.'])
+
+  # if not in HA mode, then SKIP
+  if not NAMESERVICE_KEY in parameters:
+    return (RESULT_STATE_SKIPPED, ['NameNode HA is not enabled'])
+
+  # hdfs-site is required
+  if not HDFS_SITE_KEY in parameters:
+    return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(HDFS_SITE_KEY)])
+
+  # determine whether or not SSL is enabled
+  is_ssl_enabled = False
+  if DFS_POLICY_KEY in parameters:
+    dfs_policy = parameters[DFS_POLICY_KEY]
+    if dfs_policy == "HTTPS_ONLY":
+      is_ssl_enabled = True
+
+  name_service = parameters[NAMESERVICE_KEY]
+  hdfs_site = parameters[HDFS_SITE_KEY]
+
+  # look for dfs.ha.namenodes.foo
+  nn_unique_ids_key = 'dfs.ha.namenodes.' + name_service
+  if not nn_unique_ids_key in hdfs_site:
+    return (RESULT_STATE_UNKNOWN, ['Unable to find unique namenode alias key {0}'.format(nn_unique_ids_key)])
+
+  namenode_http_fragment = 'dfs.namenode.http-address.{0}.{1}'
+  jmx_uri_fragment = "http://{0}/jmx?qry=Hadoop:service=NameNode,name=NameNodeStatus"
+
+  if is_ssl_enabled:
+    namenode_http_fragment = 'dfs.namenode.https-address.{0}.{1}'
+    jmx_uri_fragment = "https://{0}/jmx?qry=Hadoop:service=NameNode,name=NameNodeStatus"
+
+
+  active_namenodes = []
+  standby_namenodes = []
+  unknown_namenodes = []
+
+  # now we have something like 'nn1,nn2,nn3,nn4'
+  # turn it into dfs.namenode.[property].[dfs.nameservices].[nn_unique_id]
+  # ie dfs.namenode.http-address.hacluster.nn1
+  nn_unique_ids = hdfs_site[nn_unique_ids_key].split(',')
+  for nn_unique_id in nn_unique_ids:
+    key = namenode_http_fragment.format(name_service,nn_unique_id)
+
+    if key in hdfs_site:
+      # use str() to ensure that unicode strings do not have the u' in them
+      value = str(hdfs_site[key])
+
+      try:
+        jmx_uri = jmx_uri_fragment.format(value)
+        state = get_value_from_jmx(jmx_uri,'State')
+
+        if state == HDFS_NN_STATE_ACTIVE:
+          active_namenodes.append(value)
+        elif state == HDFS_NN_STATE_STANDBY:
+          standby_namenodes.append(value)
+        else:
+          unknown_namenodes.append(value)
+      except:
+        unknown_namenodes.append(value)
+
+  # now that the request is done, determine if this host is the host that
+  # should report the status of the HA topology
+  is_active_namenode = False
+  for active_namenode in active_namenodes:
+    if active_namenode.startswith(host_name):
+      is_active_namenode = True
+
+  # there's only one scenario here; there is exactly 1 active and 1 standby
+  is_topology_healthy = len(active_namenodes) == 1 and len(standby_namenodes) == 1
+
+  result_label = 'Active{0}, Standby{1}, Unknown{2}'.format(str(active_namenodes),
+    str(standby_namenodes), str(unknown_namenodes))
+
+  # Healthy Topology:
+  #   - Active NN reports the alert, standby does not
+  #
+  # Unhealthy Topology:
+  #   - Report the alert if this is the first named host
+  #   - Report the alert if not the first named host, but the other host
+  #   could not report its status
+  if is_topology_healthy:
+    if is_active_namenode is True:
+      return (RESULT_STATE_OK, [result_label])
+    else:
+      return (RESULT_STATE_SKIPPED, ['Another host will report this alert'])
+  else:
+    # dfs.namenode.rpc-address.service.alias is guaranteed in HA mode
+    first_listed_host_key = 'dfs.namenode.rpc-address.{0}.{1}'.format(
+      name_service, nn_unique_ids[0])
+
+    first_listed_host = ''
+    if first_listed_host_key in hdfs_site:
+      first_listed_host = hdfs_site[first_listed_host_key]
+
+    is_first_listed_host = False
+    if first_listed_host.startswith(host_name):
+      is_first_listed_host = True
+
+    if is_first_listed_host:
+      return (RESULT_STATE_CRITICAL, [result_label])
+    else:
+      # not the first listed host, but the first host might be in the unknown
+      return (RESULT_STATE_SKIPPED, ['Another host will report this alert'])
+
+
+def get_value_from_jmx(query, jmx_property):
+  response = None
+
+  try:
+    response = urllib2.urlopen(query)
+    data = response.read()
+
+    data_dict = json.loads(data)
+    return data_dict["beans"][0][jmx_property]
+  finally:
+    if response is not None:
+      try:
+        response.close()
+      except:
+        pass

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/files/checkForFormat.sh b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/files/checkForFormat.sh
new file mode 100755
index 0000000..5eff79d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/files/checkForFormat.sh
@@ -0,0 +1,70 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export hdfs_user=$1
+shift
+export conf_dir=$1
+shift
+export bin_dir=$1
+shift
+export old_mark_dir=$1
+shift
+export mark_dir=$1
+shift
+export name_dirs=$*
+
+export EXIT_CODE=0
+export command="namenode -format"
+export list_of_non_empty_dirs=""
+
+mark_file=/var/run/hadoop/hdfs/namenode-formatted
+if [[ -f ${mark_file} ]] ; then
+  rm -f ${mark_file}
+  mkdir -p ${mark_dir}
+fi
+
+if [[ -d $old_mark_dir ]] ; then
+  mv ${old_mark_dir} ${mark_dir}
+fi
+
+if [[ ! -d $mark_dir ]] ; then
+  for dir in `echo $name_dirs | tr ',' ' '` ; do
+    echo "NameNode Dirname = $dir"
+    cmd="ls $dir | wc -l  | grep -q ^0$"
+    eval $cmd
+    if [[ $? -ne 0 ]] ; then
+      (( EXIT_CODE = $EXIT_CODE + 1 ))
+      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
+    fi
+  done
+
+  if [[ $EXIT_CODE == 0 ]] ; then
+    su -s /bin/bash - ${hdfs_user} -c "export PATH=$PATH:${bin_dir} ; yes Y | hdfs --config ${conf_dir} ${command}"
+    (( EXIT_CODE = $EXIT_CODE | $? ))
+  else
+    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
+  fi
+else
+  echo "${mark_dir} exists. Namenode DFS already formatted"
+fi
+
+exit $EXIT_CODE

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/files/checkWebUI.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/files/checkWebUI.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/files/checkWebUI.py
new file mode 100755
index 0000000..6c54188
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/files/checkWebUI.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import optparse
+import httplib
+
+#
+# Main.
+#
+def main():
+  parser = optparse.OptionParser(usage="usage: %prog [options] component ")
+  parser.add_option("-m", "--hosts", dest="hosts", help="Comma separated hosts list for WEB UI to check it availability")
+  parser.add_option("-p", "--port", dest="port", help="Port of WEB UI to check it availability")
+  parser.add_option("-s", "--https", dest="https", help="\"True\" if value of dfs.http.policy is \"HTTPS_ONLY\"")
+
+  (options, args) = parser.parse_args()
+
+  hosts = options.hosts.split(',')
+  port = options.port
+  https = options.https
+
+  for host in hosts:
+    try:
+      conn = httplib.HTTPConnection(host, port) if not https.lower() == "true" else httplib.HTTPSConnection(host, port)
+      # This can be modified to get a partial url part to be sent with request
+      conn.request("GET", "/")
+      httpCode = conn.getresponse().status
+      conn.close()
+    except Exception:
+      httpCode = 404
+
+    if httpCode != 200:
+      if not https:
+        print "Cannot access WEB UI on: http://" + host + ":" + port if not https.lower() == "true" else "Cannot access WEB UI on: https://" + host + ":" + port
+
+      exit(1)
+
+if __name__ == "__main__":
+  main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/balancer-emulator/hdfs-command.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/balancer-emulator/hdfs-command.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/balancer-emulator/hdfs-command.py
new file mode 100755
index 0000000..2939fb7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/balancer-emulator/hdfs-command.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+import time
+import sys
+from threading import Thread
+
+
+def write_function(path, handle, interval):
+  with open(path) as f:
+      for line in f:
+          handle.write(line)
+          handle.flush()
+          time.sleep(interval)
+
+thread = Thread(target =  write_function, args = ('balancer.log', sys.stdout, 1.5))
+thread.start()
+
+threaderr = Thread(target =  write_function, args = ('balancer-err.log', sys.stderr, 1.5 * 0.023))
+threaderr.start()
+
+thread.join()
+
+
+def rebalancer_out():
+  write_function('balancer.log', sys.stdout)
+
+def rebalancer_err():
+  write_function('balancer-err.log', sys.stdout)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/datanode.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/datanode.py
new file mode 100755
index 0000000..5d1e18b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/datanode.py
@@ -0,0 +1,144 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import datanode_upgrade
+from hdfs_datanode import datanode
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management import *
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, FILE_TYPE_XML
+from hdfs import hdfs
+
+
+class DataNode(Script):
+
+  def install(self, env):
+    import params
+    self.install_packages(env)
+    env.set_params(params)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hdfs()
+    datanode(action="configure")
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env)
+    datanode(action="start")
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    # pre-upgrade steps shutdown the datanode, so there's no need to call
+    # action=stop
+    if upgrade_type == "rolling":
+      datanode_upgrade.pre_upgrade_shutdown()
+    else:
+      datanode(action="stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.datanode_pid_file)
+
+  def get_component_name(self):
+    return "hadoop-hdfs-datanode"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing DataNode Stack Upgrade pre-restart")
+    import params
+    env.set_params(params)
+    if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-hdfs-datanode", params.version)
+      #Execute(format("stack-select set hadoop-hdfs-datanode {version}"))
+
+
+  def post_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing DataNode Stack Upgrade post-restart")
+    import params
+    env.set_params(params)
+    # ensure the DataNode has started and rejoined the cluster
+    datanode_upgrade.post_upgrade_check()
+
+  def security_status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    props_value_check = {"hadoop.security.authentication": "kerberos",
+                         "hadoop.security.authorization": "true"}
+    props_empty_check = ["hadoop.security.auth_to_local"]
+    props_read_check = None
+    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
+                                                props_read_check)
+    props_value_check = None
+    props_empty_check = ['dfs.datanode.keytab.file',
+                         'dfs.datanode.kerberos.principal']
+    props_read_check = ['dfs.datanode.keytab.file']
+    hdfs_site_expectations = build_expectations('hdfs-site', props_value_check, props_empty_check,
+                                                props_read_check)
+
+    hdfs_expectations = {}
+    hdfs_expectations.update(core_site_expectations)
+    hdfs_expectations.update(hdfs_site_expectations)
+
+    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
+                                                 {'core-site.xml': FILE_TYPE_XML,
+                                                  'hdfs-site.xml': FILE_TYPE_XML})
+
+    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
+        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
+      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
+      if not result_issues:  # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if ('hdfs-site' not in security_params or
+                  'dfs.datanode.keytab.file' not in security_params['hdfs-site'] or
+                  'dfs.datanode.kerberos.principal' not in security_params['hdfs-site']):
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out(
+              {"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.hdfs_user,
+                                security_params['hdfs-site']['dfs.datanode.keytab.file'],
+                                security_params['hdfs-site']['dfs.datanode.kerberos.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+
+if __name__ == "__main__":
+  DataNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/datanode_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/datanode_upgrade.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/datanode_upgrade.py
new file mode 100755
index 0000000..529ca4438
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/datanode_upgrade.py
@@ -0,0 +1,114 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.core.logger import Logger
+from resource_management.core.exceptions import Fail
+from resource_management.core.resources.system import Execute
+from resource_management.core import shell
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.decorator import retry
+
+
+def pre_upgrade_shutdown():
+  """
+  Runs the "shutdownDatanode {ipc_address} upgrade" command to shutdown the
+  DataNode in preparation for an upgrade. This will then periodically check
+  "getDatanodeInfo" to ensure the DataNode has shutdown correctly.
+  This function will obtain the Kerberos ticket if security is enabled.
+  :return:
+  """
+  import params
+
+  Logger.info('DataNode executing "shutdownDatanode" command in preparation for upgrade...')
+  if params.security_enabled:
+    Execute(params.dn_kinit_cmd, user = params.hdfs_user)
+
+  command = format('hdfs dfsadmin -shutdownDatanode {dfs_dn_ipc_address} upgrade')
+  Execute(command, user=params.hdfs_user, tries=1 )
+
+  # verify that the datanode is down
+  _check_datanode_shutdown()
+
+
+def post_upgrade_check():
+  """
+  Verifies that the DataNode has rejoined the cluster. This function will
+  obtain the Kerberos ticket if security is enabled.
+  :return:
+  """
+  import params
+
+  Logger.info("Checking that the DataNode has rejoined the cluster after upgrade...")
+  if params.security_enabled:
+    Execute(params.dn_kinit_cmd,user = params.hdfs_user)
+
+  # verify that the datanode has started and rejoined the HDFS cluster
+  _check_datanode_startup()
+
+
+@retry(times=12, sleep_time=10, err_class=Fail)
+def _check_datanode_shutdown():
+  """
+  Checks that a DataNode is down by running "hdfs dfsamin getDatanodeInfo"
+  several times, pausing in between runs. Once the DataNode stops responding
+  this method will return, otherwise it will raise a Fail(...) and retry
+  automatically.
+  :return:
+  """
+  import params
+
+  command = format('hdfs dfsadmin -getDatanodeInfo {dfs_dn_ipc_address}')
+
+  try:
+    Execute(command, user=params.hdfs_user, tries=1)
+  except:
+    Logger.info("DataNode has successfully shutdown for upgrade.")
+    return
+
+  Logger.info("DataNode has not shutdown.")
+  raise Fail('DataNode has not shutdown.')
+
+
+@retry(times=12, sleep_time=10, err_class=Fail)
+def _check_datanode_startup():
+  """
+  Checks that a DataNode is reported as being alive via the
+  "hdfs dfsadmin -report -live" command. Once the DataNode is found to be
+  alive this method will return, otherwise it will raise a Fail(...) and retry
+  automatically.
+  :return:
+  """
+  import params
+
+  try:
+    # 'su - hdfs -c "hdfs dfsadmin -report -live"'
+    command = 'hdfs dfsadmin -report -live'
+    return_code, hdfs_output = shell.call(command, user=params.hdfs_user)
+  except:
+    raise Fail('Unable to determine if the DataNode has started after upgrade.')
+
+  if return_code == 0:
+    if params.hostname.lower() in hdfs_output.lower():
+      Logger.info("DataNode {0} reports that it has rejoined the cluster.".format(params.hostname))
+      return
+    else:
+      raise Fail("DataNode {0} was not found in the list of live DataNodes".format(params.hostname))
+
+  # return_code is not 0, fail
+  raise Fail("Unable to determine if the DataNode has started after upgrade (result code {0})".format(str(return_code)))

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/hdfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/hdfs.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/hdfs.py
new file mode 100755
index 0000000..002b87d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/hdfs.py
@@ -0,0 +1,129 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+import sys
+import os
+
+
+def hdfs(name=None):
+  import params
+
+  if params.create_lib_snappy_symlinks:
+    install_snappy()
+
+  # On some OS this folder could be not exists, so we will create it before pushing there files
+  Directory(params.limits_conf_dir,
+            create_parents=True,
+            owner='root',
+            group='root'
+  )
+
+  File(os.path.join(params.limits_conf_dir, 'hdfs.conf'),
+       owner='root',
+       group='root',
+       mode=0644,
+       content=Template("hdfs.conf.j2")
+  )
+
+  if params.security_enabled:
+    tc_mode = 0644
+    tc_owner = "root"
+  else:
+    tc_mode = None
+    tc_owner = params.hdfs_user
+
+  if "hadoop-policy" in params.config['configurations']:
+    XmlConfig("hadoop-policy.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations']['hadoop-policy'],
+              configuration_attributes=params.config['configuration_attributes']['hadoop-policy'],
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+
+  if "ssl-client" in params.config['configurations']:
+    XmlConfig("ssl-client.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations']['ssl-client'],
+              configuration_attributes=params.config['configuration_attributes']['ssl-client'],
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+
+    Directory(params.hadoop_conf_secure_dir,
+              create_parents=True,
+              owner='root',
+              group=params.user_group,
+              cd_access='a',
+              )
+
+    XmlConfig("ssl-client.xml",
+              conf_dir=params.hadoop_conf_secure_dir,
+              configurations=params.config['configurations']['ssl-client'],
+              configuration_attributes=params.config['configuration_attributes']['ssl-client'],
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+
+  if "ssl-server" in params.config['configurations']:
+    XmlConfig("ssl-server.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations']['ssl-server'],
+              configuration_attributes=params.config['configuration_attributes']['ssl-server'],
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+
+  XmlConfig("hdfs-site.xml",
+            conf_dir=params.hadoop_conf_dir,
+            configurations=params.config['configurations']['hdfs-site'],
+            configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
+            owner=params.hdfs_user,
+            group=params.user_group
+  )
+
+  XmlConfig("core-site.xml",
+            conf_dir=params.hadoop_conf_dir,
+            configurations=params.config['configurations']['core-site'],
+            configuration_attributes=params.config['configuration_attributes']['core-site'],
+            owner=params.hdfs_user,
+            group=params.user_group,
+            mode=0644
+  )
+
+  File(os.path.join(params.hadoop_conf_dir, 'slaves'),
+       owner=tc_owner,
+       content=Template("slaves.j2")
+  )
+
+
+def install_snappy():
+  import params
+  Directory([params.so_target_dir_x86, params.so_target_dir_x64],
+            create_parents=True,
+  )
+  Link(params.so_target_x86,
+       to=params.so_src_x86,
+  )
+  Link(params.so_target_x64,
+       to=params.so_src_x64,
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/hdfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/hdfs_client.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/hdfs_client.py
new file mode 100755
index 0000000..39cd1f4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/hdfs_client.py
@@ -0,0 +1,112 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_XML
+from hdfs import hdfs
+from utils import service
+
+
+class HdfsClient(Script):
+
+  def get_component_name(self):
+    return "hadoop-client"
+
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+    self.config(env)
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-client", params.version)
+
+  def start(self, env, upgrade_type=False):
+    import params
+
+    env.set_params(params)
+
+  def stop(self, env, upgrade_type=False):
+    import params
+
+    env.set_params(params)
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+  def config(self, env):
+    import params
+    hdfs()
+
+  def security_status(self, env):
+    import status_params
+    env.set_params(status_params)
+
+    props_value_check = {"hadoop.security.authentication": "kerberos",
+                         "hadoop.security.authorization": "true"}
+    props_empty_check = ["hadoop.security.auth_to_local"]
+    props_read_check = None
+    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
+                                                props_read_check)
+    hdfs_expectations ={}
+    hdfs_expectations.update(core_site_expectations)
+
+    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
+                                                   {'core-site.xml': FILE_TYPE_XML})
+
+    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
+        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
+      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
+      if not result_issues: # If all validations passed successfully
+        if status_params.hdfs_user_principal or status_params.hdfs_user_keytab:
+          try:
+            cached_kinit_executor(status_params.kinit_path_local,
+                       status_params.hdfs_user,
+                       status_params.hdfs_user_keytab,
+                       status_params.hdfs_user_principal,
+                       status_params.hostname,
+                       status_params.tmp_dir)
+            self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+          except Exception as e:
+            self.put_structured_out({"securityState": "ERROR"})
+            self.put_structured_out({"securityStateErrorInfo": str(e)})
+        else:
+          self.put_structured_out({"securityIssuesFound": "hdfs principal and/or keytab file is not specified"})
+          self.put_structured_out({"securityState": "UNSECURED"})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+if __name__ == "__main__":
+  HdfsClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/hdfs_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/hdfs_datanode.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/hdfs_datanode.py
new file mode 100755
index 0000000..420e556
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/hdfs_datanode.py
@@ -0,0 +1,75 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+from resource_management import *
+#from resource_management.libraries.functions.dfs_datanode_helper import handle_dfs_data_dir
+from resource_management.libraries.functions.mounted_dirs_helper import handle_mounted_dirs
+from utils import service
+
+
+def create_dirs(data_dir):
+  """
+  :param data_dir: The directory to create
+  :param params: parameters
+  """
+  import params
+  Directory(data_dir,
+            create_parents=True,
+            cd_access="a",
+            mode=0755,
+            owner=params.hdfs_user,
+            group=params.user_group,
+            ignore_failures=True
+  )
+
+
+def datanode(action=None):
+  import params
+  if action == "configure":
+    Directory(params.dfs_domain_socket_dir,
+              create_parents=True,
+              mode=0751,
+              owner=params.hdfs_user,
+              group=params.user_group)
+
+    # handle_mounted_dirs ensures that we don't create dfs data dirs which are temporary unavailable (unmounted), and intended to reside on a different mount.
+    data_dir_to_mount_file_content = handle_mounted_dirs(create_dirs, params.dfs_data_dirs, params.data_dir_mount_file, params)
+    # create a history file used by handle_mounted_dirs
+    File(params.data_dir_mount_file,
+         owner=params.hdfs_user,
+         group=params.user_group,
+         mode=0644,
+         content=data_dir_to_mount_file_content
+    )
+  elif action == "start" or action == "stop":
+    Directory(params.hadoop_pid_dir_prefix,
+              mode=0755,
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+    service(
+      action=action, name="datanode",
+      user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True
+    )
+  elif action == "status":
+    import status_params
+    check_process_status(status_params.datanode_pid_file)

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/hdfs_namenode.py
new file mode 100755
index 0000000..f37494c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/hdfs_namenode.py
@@ -0,0 +1,483 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os.path
+import time
+
+from resource_management import *
+from resource_management.core.exceptions import ComponentIsNotRunning
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.functions import Direction
+
+from resource_management.core.shell import as_user
+from resource_management.core.exceptions import Fail
+from resource_management.core.logger import Logger
+from resource_management.core import shell
+from resource_management.core.source import Template
+from resource_management.core.resources.system import File, Execute, Directory
+from resource_management.core.resources.service import Service
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.resources.execute_hadoop import ExecuteHadoop
+from ambari_commons import OSCheck, OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl, OsFamilyFuncImpl
+from utils import get_dfsadmin_base_command
+from utils import service, safe_zkfc_op, is_previous_fs_image
+
+if OSCheck.is_windows_family():
+  from resource_management.libraries.functions.windows_service_utils import check_windows_service_status
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def namenode(action=None, do_format=True, upgrade_type=None, env=None):
+  import params
+  #we need this directory to be present before any action(HA manual steps for
+  #additional namenode)
+  if action == "configure":
+    create_name_dirs(params.dfs_name_dir)
+
+  if action == "start":
+    if do_format:
+      format_namenode()
+      pass
+
+    File(params.exclude_file_path,
+         content=Template("exclude_hosts_list.j2"),
+         owner=params.hdfs_user,
+         group=params.user_group
+    )
+
+    Directory(params.hadoop_pid_dir_prefix,
+              mode=0755,
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+
+    if params.dfs_ha_enabled and \
+      params.dfs_ha_namenode_standby is not None and \
+      params.hostname == params.dfs_ha_namenode_standby:
+        # if the current host is the standby NameNode in an HA deployment
+        # run the bootstrap command, to start the NameNode in standby mode
+        # this requires that the active NameNode is already up and running,
+        # so this execute should be re-tried upon failure, up to a timeout
+        success = bootstrap_standby_namenode(params)
+        if not success:
+          raise Fail("Could not bootstrap standby namenode")
+
+    if upgrade_type == "rolling":
+      # Must start Zookeeper Failover Controller if it exists on this host because it could have been killed in order to initiate the failover.
+      safe_zkfc_op(action, env)
+
+    #options = "-rollingUpgrade started" if rolling_restart else ""
+    options = ""
+    if upgrade_type == "rolling":
+      if params.upgrade_direction == Direction.UPGRADE:
+        options = "-rollingUpgrade started"
+      elif params.upgrade_direction == Direction.DOWNGRADE:
+        options = "-rollingUpgrade downgrade"
+    elif upgrade_type == "nonrolling":
+      is_previous_image_dir = is_previous_fs_image()
+      Logger.info(format("Previous file system image dir present is {is_previous_image_dir}"))
+
+      if params.upgrade_direction == Direction.UPGRADE:
+        options = "-rollingUpgrade started"
+      elif params.upgrade_direction == Direction.DOWNGRADE:
+        options = "-rollingUpgrade downgrade"
+
+    Logger.info(format("Option for start command: {options}"))
+
+    service(
+      action="start",
+      name="namenode",
+      user=params.hdfs_user,
+      options=options,
+      create_pid_dir=True,
+      create_log_dir=True
+    )
+
+
+    if params.security_enabled:
+      Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
+              user = params.hdfs_user)
+
+    if params.dfs_ha_enabled:
+      is_active_namenode_cmd = as_user(format("hdfs --config {hadoop_conf_dir} haadmin -getServiceState {namenode_id} | grep active"), params.hdfs_user, env={'PATH':params.hadoop_bin_dir})
+
+    else:
+      is_active_namenode_cmd = True
+
+    # During NonRolling Upgrade, both NameNodes are initially down,
+    # so no point in checking if this is the active or standby.
+    if upgrade_type == "nonrolling":
+      is_active_namenode_cmd = False
+
+    # ___Scenario___________|_Expected safemode state__|_Wait for safemode OFF____|
+    # no-HA                 | ON -> OFF                | Yes                      |
+    # HA and active         | ON -> OFF                | Yes                      |
+    # HA and standby        | no change                | no check                 |
+    # RU with HA on active  | ON -> OFF                | Yes                      |
+    # RU with HA on standby | ON -> OFF                | Yes                      |
+    # EU with HA on active  | no change                | no check                 |
+    # EU with HA on standby | no change                | no check                 |
+    # EU non-HA             | no change                | no check                 |
+
+    check_for_safemode_off = False
+    msg = ""
+    if params.dfs_ha_enabled:
+      if upgrade_type is not None:
+        check_for_safemode_off = True
+        msg = "Must wait to leave safemode since High Availability is enabled during a Stack Upgrade"
+      else:
+        Logger.info("Wait for NameNode to become active.")
+        if is_active_namenode(): # active
+          check_for_safemode_off = True
+          msg = "Must wait to leave safemode since High Availability is enabled and this is the Active NameNode."
+        else:
+          msg = "Will remain in the current safemode state."
+    else:
+      msg = "Must wait to leave safemode since High Availability is not enabled."
+      check_for_safemode_off = True
+
+    Logger.info(msg)
+
+    # During a NonRolling (aka Express Upgrade), stay in safemode since the DataNodes are down.
+    stay_in_safe_mode = False
+    if upgrade_type == "nonrolling":
+      stay_in_safe_mode = True
+
+    if check_for_safemode_off:
+      Logger.info("Stay in safe mode: {0}".format(stay_in_safe_mode))
+      if not stay_in_safe_mode:
+        wait_for_safemode_off()
+
+    # Always run this on non-HA, or active NameNode during HA.
+    create_hdfs_directories(is_active_namenode_cmd)
+
+    '''if params.dfs_ha_enabled:
+      dfs_check_nn_status_cmd = as_user(format("hdfs --config {hadoop_conf_dir} haadmin -getServiceState {namenode_id} | grep active"), params.hdfs_user, env={'PATH':params.hadoop_bin_dir})
+    else:
+      dfs_check_nn_status_cmd = None
+
+    namenode_safe_mode_off = format("hdfs dfsadmin -fs {namenode_address} -safemode get | grep 'Safe mode is OFF'")
+
+    # If HA is enabled and it is in standby, then stay in safemode, otherwise, leave safemode.
+    leave_safe_mode = True
+    if dfs_check_nn_status_cmd is not None:
+      code, out = shell.call(dfs_check_nn_status_cmd) # If active NN, code will be 0
+      if code != 0:
+        leave_safe_mode = False
+
+    if leave_safe_mode:
+      # First check if Namenode is not in 'safemode OFF' (equivalent to safemode ON), if so, then leave it
+      code, out = shell.call(namenode_safe_mode_off)
+      if code != 0:
+        leave_safe_mode_cmd = format("hdfs --config {hadoop_conf_dir} dfsadmin -fs {namenode_address} -safemode leave")
+        Execute(leave_safe_mode_cmd,
+                tries=10,
+                try_sleep=10,
+                user=params.hdfs_user,
+                path=[params.hadoop_bin_dir],
+        )
+
+    # Verify if Namenode should be in safemode OFF
+    Execute(namenode_safe_mode_off,
+            tries=40,
+            try_sleep=10,
+            path=[params.hadoop_bin_dir],
+            user=params.hdfs_user,
+            only_if=dfs_check_nn_status_cmd #skip when HA not active
+    )
+    create_hdfs_directories(dfs_check_nn_status_cmd)'''
+
+  if action == "stop":
+    service(
+      action="stop", name="namenode",
+      user=params.hdfs_user
+    )
+
+  if action == "decommission":
+    decommission()
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def namenode(action=None, do_format=True, upgrade_type=None, env=None):
+  if action == "configure":
+    pass
+  elif action == "start":
+    import params
+    #TODO: Replace with format_namenode()
+    namenode_format_marker = os.path.join(params.hadoop_conf_dir,"NN_FORMATTED")
+    if not os.path.exists(namenode_format_marker):
+      hadoop_cmd = "cmd /C %s" % (os.path.join(params.hadoop_home, "bin", "hadoop.cmd"))
+      Execute("%s namenode -format" % (hadoop_cmd))
+      open(namenode_format_marker, 'a').close()
+    Service(params.namenode_win_service_name, action=action)
+  elif action == "stop":
+    import params
+    Service(params.namenode_win_service_name, action=action)
+  elif action == "status":
+    import status_params
+    check_windows_service_status(status_params.namenode_win_service_name)
+  elif action == "decommission":
+    decommission()
+
+def create_name_dirs(directories):
+  import params
+
+  dirs = directories.split(",")
+  Directory(dirs,
+            mode=0755,
+            owner=params.hdfs_user,
+            group=params.user_group,
+            create_parents=True,
+            cd_access="a",
+  )
+
+
+def create_hdfs_directories(check):
+  import params
+
+  params.HdfsResource("/tmp",
+                       type="directory",
+                       action="create_on_execute",
+                       owner=params.hdfs_user,
+                       mode=0777,
+                       only_if=check
+  )
+  params.HdfsResource(params.smoke_hdfs_user_dir,
+                       type="directory",
+                       action="create_on_execute",
+                       owner=params.smoke_user,
+                       mode=params.smoke_hdfs_user_mode,
+                       only_if=check
+  )
+  params.HdfsResource(None,
+                      action="execute",
+                      only_if=check #skip creation when HA not active
+  )
+
+def format_namenode(force=None):
+  import params
+
+  old_mark_dir = params.namenode_formatted_old_mark_dirs
+  mark_dir = params.namenode_formatted_mark_dirs
+  dfs_name_dir = params.dfs_name_dir
+  hdfs_user = params.hdfs_user
+  hadoop_conf_dir = params.hadoop_conf_dir
+
+  if not params.dfs_ha_enabled:
+    if force:
+      ExecuteHadoop('namenode -format',
+                    kinit_override=True,
+                    bin_dir=params.hadoop_bin_dir,
+                    conf_dir=hadoop_conf_dir)
+    else:
+      if not is_namenode_formatted(params):
+        Execute(format("yes Y | hdfs --config {hadoop_conf_dir} namenode -format"),
+                user = params.hdfs_user,
+                path = [params.hadoop_bin_dir]
+        )
+        for m_dir in mark_dir:
+          Directory(m_dir,
+            create_parents = True
+          )
+  else:
+    if params.dfs_ha_namenode_active is not None and \
+       params.hostname == params.dfs_ha_namenode_active:
+      # check and run the format command in the HA deployment scenario
+      # only format the "active" namenode in an HA deployment
+      if force:
+        ExecuteHadoop('namenode -format',
+                      kinit_override=True,
+                      bin_dir=params.hadoop_bin_dir,
+                      conf_dir=hadoop_conf_dir)
+      else:
+        if not is_namenode_formatted(params):
+          Execute(format("yes Y | hdfs --config {hadoop_conf_dir} namenode -format"),
+                  user = params.hdfs_user,
+                  path = [params.hadoop_bin_dir]
+          )
+          for m_dir in mark_dir:
+            Directory(m_dir,
+              create_parents = True
+            )
+
+def is_namenode_formatted(params):
+  old_mark_dirs = params.namenode_formatted_old_mark_dirs
+  mark_dirs = params.namenode_formatted_mark_dirs
+  nn_name_dirs = params.dfs_name_dir.split(',')
+  marked = False
+  # Check if name directories have been marked as formatted
+  for mark_dir in mark_dirs:
+    if os.path.isdir(mark_dir):
+      marked = True
+      print format("{mark_dir} exists. Namenode DFS already formatted")
+
+  # Ensure that all mark dirs created for all name directories
+  if marked:
+    for mark_dir in mark_dirs:
+      Directory(mark_dir,
+        create_parents = True
+      )
+    return marked
+
+  # Move all old format markers to new place
+  for old_mark_dir in old_mark_dirs:
+    if os.path.isdir(old_mark_dir):
+      for mark_dir in mark_dirs:
+        Execute(('cp', '-ar', old_mark_dir, mark_dir),
+                sudo = True
+        )
+        marked = True
+      Directory(old_mark_dir,
+        action = "delete"
+      )
+    elif os.path.isfile(old_mark_dir):
+      for mark_dir in mark_dirs:
+        Directory(mark_dir,
+                  create_parents = True,
+        )
+      Directory(old_mark_dir,
+        action = "delete"
+      )
+      marked = True
+
+  # Check if name dirs are not empty
+  for name_dir in nn_name_dirs:
+    try:
+      Execute(format("ls {name_dir} | wc -l  | grep -q ^0$"),
+      )
+      marked = False
+    except Exception:
+      marked = True
+      print format("ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs {nn_name_dirs}")
+      break
+
+  return marked
+
+def decommission():
+  import params
+
+  hdfs_user = params.hdfs_user
+  conf_dir = params.hadoop_conf_dir
+  user_group = params.user_group
+  nn_kinit_cmd = params.nn_kinit_cmd
+
+  File(params.exclude_file_path,
+       content=Template("exclude_hosts_list.j2"),
+       owner=hdfs_user,
+       group=user_group
+  )
+
+  if not params.update_exclude_file_only:
+    Execute(nn_kinit_cmd,
+            user=hdfs_user
+    )
+
+    if params.dfs_ha_enabled:
+      # due to a bug in hdfs, refreshNodes will not run on both namenodes so we
+      # need to execute each command scoped to a particular namenode
+      nn_refresh_cmd = format('dfsadmin -fs hdfs://{namenode_rpc} -refreshNodes')
+    else:
+      nn_refresh_cmd = format('dfsadmin -refreshNodes')
+    ExecuteHadoop(nn_refresh_cmd,
+                  user=hdfs_user,
+                  conf_dir=conf_dir,
+                  kinit_override=True,
+                  bin_dir=params.hadoop_bin_dir)
+
+def bootstrap_standby_namenode(params):
+  try:
+    iterations = 50
+    bootstrap_cmd = "hdfs namenode -bootstrapStandby -nonInteractive"
+    Logger.info("Boostrapping standby namenode: %s" % (bootstrap_cmd))
+    for i in range(iterations):
+      Logger.info('Try %d out of %d' % (i+1, iterations))
+      code, out = shell.call(bootstrap_cmd, logoutput=False, user=params.hdfs_user)
+      if code == 0:
+        Logger.info("Standby namenode bootstrapped successfully")
+        return True
+      elif code == 5:
+        Logger.info("Standby namenode already bootstrapped")
+        return True
+      else:
+        Logger.warning('Bootstrap standby namenode failed with %d error code. Will retry' % (code))
+  except Exception as ex:
+    Logger.error('Bootstrap standby namenode threw an exception. Reason %s' %(str(ex)))
+  return False
+
+def is_active_namenode():
+  """
+  Checks if current NameNode is active. Waits up to 30 seconds. If other NameNode is active returns False.
+  :return: True if current NameNode is active, False otherwise
+  """
+  import params
+
+  if params.dfs_ha_enabled:
+    is_active_this_namenode_cmd = as_user(format("hdfs --config {hadoop_conf_dir} haadmin -getServiceState {namenode_id} | grep active"), params.hdfs_user, env={'PATH':params.hadoop_bin_dir})
+    is_active_other_namenode_cmd = as_user(format("hdfs --config {hadoop_conf_dir} haadmin -getServiceState {other_namenode_id} | grep active"), params.hdfs_user, env={'PATH':params.hadoop_bin_dir})
+
+    for i in range(0, 5):
+      code, out = shell.call(is_active_this_namenode_cmd) # If active NN, code will be 0
+      if code == 0: # active
+        return True
+
+      code, out = shell.call(is_active_other_namenode_cmd) # If other NN is active, code will be 0
+      if code == 0: # other NN is active
+        return False
+
+      if i < 4: # Do not sleep after last iteration
+        time.sleep(6)
+
+    Logger.info("Active NameNode is not found.")
+    return False
+
+  else:
+    return True
+
+def wait_for_safemode_off(afterwait_sleep=0, execute_kinit=False):
+  """
+  During NonRolling (aka Express Upgrade), after starting NameNode, which is still in safemode, and then starting
+  all of the DataNodes, we need for NameNode to receive all of the block reports and leave safemode.
+  If HA is present, then this command will run individually on each NameNode, which checks for its own address.
+  """
+  import params
+
+  Logger.info("Wait to leafe safemode since must transition from ON to OFF.")
+
+  if params.security_enabled and execute_kinit:
+    kinit_command = format("{params.kinit_path_local} -kt {params.hdfs_user_keytab} {params.hdfs_principal_name}")
+    Execute(kinit_command, user=params.hdfs_user, logoutput=True)
+
+  try:
+    # Note, this fails if namenode_address isn't prefixed with "params."
+
+    dfsadmin_base_command = get_dfsadmin_base_command('hdfs', use_specific_namenode=True)
+    is_namenode_safe_mode_off = dfsadmin_base_command + " -safemode get | grep 'Safe mode is OFF'"
+
+    # Wait up to 30 mins
+    Execute(is_namenode_safe_mode_off,
+            tries=115,
+            try_sleep=10,
+            user=params.hdfs_user,
+            logoutput=True
+            )
+
+    # Wait a bit more since YARN still depends on block reports coming in.
+    # Also saw intermittent errors with HBASE service check if it was done too soon.
+    time.sleep(afterwait_sleep)
+  except Fail:
+    Logger.error("NameNode is still in safemode, please be careful with commands that need safemode OFF.")

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/hdfs_nfsgateway.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/hdfs_nfsgateway.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/hdfs_nfsgateway.py
new file mode 100755
index 0000000..efebfc5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/hdfs_nfsgateway.py
@@ -0,0 +1,72 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.core.exceptions import Fail
+from resource_management.core.logger import Logger
+from resource_management.core.resources import Directory
+from resource_management.core import shell
+from utils import service
+import subprocess,os
+
+# NFS GATEWAY is always started by root using jsvc due to rpcbind bugs
+# on Linux such as CentOS6.2. https://bugzilla.redhat.com/show_bug.cgi?id=731542
+
+def prepare_rpcbind():
+  Logger.info("check if native nfs server is running")
+  p, output = shell.call("pgrep nfsd")
+  if p == 0 :
+    Logger.info("native nfs server is running. shutting it down...")
+    # shutdown nfs
+    shell.call("service nfs stop")
+    shell.call("service nfs-kernel-server stop")
+    Logger.info("check if the native nfs server is down...")
+    p, output = shell.call("pgrep nfsd")
+    if p == 0 :
+      raise Fail("Failed to shutdown native nfs service")
+
+  Logger.info("check if rpcbind or portmap is running")
+  p, output = shell.call("pgrep rpcbind")
+  q, output = shell.call("pgrep portmap")
+
+  if p!=0 and q!=0 :
+    Logger.info("no portmap or rpcbind running. starting one...")
+    p, output = shell.call("service rpcbind start")
+    q, output = shell.call("service portmap start")
+    if p!=0 and q!=0 :
+      raise Fail("Failed to start rpcbind or portmap")
+
+  Logger.info("now we are ready to start nfs gateway")
+
+
+def nfsgateway(action=None, format=False):
+  import params
+
+  if action== "start":
+    prepare_rpcbind()
+
+  if action == "configure":
+    return
+  elif action == "start" or action == "stop":
+    service(
+      action=action,
+      name="nfs3",
+      user=params.root_user,
+      create_pid_dir=True,
+      create_log_dir=True
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/hdfs_rebalance.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/hdfs_rebalance.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/hdfs_rebalance.py
new file mode 100755
index 0000000..aea6fce
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/hdfs_rebalance.py
@@ -0,0 +1,130 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import re
+
+class HdfsParser():
+  def __init__(self):
+    self.initialLine = None
+    self.state = None
+
+  def parseLine(self, line):
+    hdfsLine = HdfsLine()
+    type, matcher = hdfsLine.recognizeType(line)
+    if(type == HdfsLine.LineType.HeaderStart):
+      self.state = 'PROCESS_STARTED'
+    elif (type == HdfsLine.LineType.Progress):
+      self.state = 'PROGRESS'
+      hdfsLine.parseProgressLog(line, matcher)
+      if(self.initialLine == None): self.initialLine = hdfsLine
+
+      return hdfsLine
+    elif (type == HdfsLine.LineType.ProgressEnd):
+      self.state = 'PROCESS_FINISED'
+    return None
+
+class HdfsLine():
+
+  class LineType:
+    HeaderStart, Progress, ProgressEnd, Unknown = range(4)
+
+
+  MEMORY_SUFFIX = ['B','KB','MB','GB','TB','PB','EB']
+  MEMORY_PATTERN = '(?P<memmult_%d>(?P<memory_%d>(\d+)(.|,)?(\d+)?) (?P<mult_%d>'+"|".join(MEMORY_SUFFIX)+'))'
+
+  HEADER_BEGIN_PATTERN = re.compile('Time Stamp\w+Iteration#\w+Bytes Already Moved\w+Bytes Left To Move\w+Bytes Being Moved')
+  PROGRESS_PATTERN = re.compile(
+                            "(?P<date>.*?)\s+" +
+                            "(?P<iteration>\d+)\s+" +
+                            MEMORY_PATTERN % (1,1,1) + "\s+" +
+                            MEMORY_PATTERN % (2,2,2) + "\s+" +
+                            MEMORY_PATTERN % (3,3,3)
+                            )
+  PROGRESS_END_PATTERN = re.compile('(The cluster is balanced. Exiting...|The cluster is balanced. Exiting...)')
+
+  def __init__(self):
+    self.date = None
+    self.iteration = None
+    self.bytesAlreadyMoved = None
+    self.bytesLeftToMove = None
+    self.bytesBeingMoved = None
+    self.bytesAlreadyMovedStr = None
+    self.bytesLeftToMoveStr = None
+    self.bytesBeingMovedStr = None
+
+  def recognizeType(self, line):
+    for (type, pattern) in (
+                            (HdfsLine.LineType.HeaderStart, self.HEADER_BEGIN_PATTERN),
+                            (HdfsLine.LineType.Progress, self.PROGRESS_PATTERN),
+                            (HdfsLine.LineType.ProgressEnd, self.PROGRESS_END_PATTERN)
+                            ):
+      m = re.match(pattern, line)
+      if m:
+        return type, m
+    return HdfsLine.LineType.Unknown, None
+
+  def parseProgressLog(self, line, m):
+    '''
+    Parse the line of 'hdfs rebalancer' output. The example output being parsed:
+
+    Time Stamp               Iteration#  Bytes Already Moved  Bytes Left To Move  Bytes Being Moved
+    Jul 28, 2014 5:01:49 PM           0                  0 B             5.74 GB            9.79 GB
+    Jul 28, 2014 5:03:00 PM           1                  0 B             5.58 GB            9.79 GB
+
+    Throws AmbariException in case of parsing errors
+
+    '''
+    m = re.match(self.PROGRESS_PATTERN, line)
+    if m:
+      self.date = m.group('date')
+      self.iteration = int(m.group('iteration'))
+
+      self.bytesAlreadyMoved = self.parseMemory(m.group('memory_1'), m.group('mult_1'))
+      self.bytesLeftToMove = self.parseMemory(m.group('memory_2'), m.group('mult_2'))
+      self.bytesBeingMoved = self.parseMemory(m.group('memory_3'), m.group('mult_3'))
+
+      self.bytesAlreadyMovedStr = m.group('memmult_1')
+      self.bytesLeftToMoveStr = m.group('memmult_2')
+      self.bytesBeingMovedStr = m.group('memmult_3')
+    else:
+      raise AmbariException("Failed to parse line [%s]")
+
+  def parseMemory(self, memorySize, multiplier_type):
+    try:
+      factor = self.MEMORY_SUFFIX.index(multiplier_type)
+    except ValueError:
+      raise AmbariException("Failed to memory value [%s %s]" % (memorySize, multiplier_type))
+
+    return float(memorySize) * (1024 ** factor)
+  def toJson(self):
+    return {
+            'timeStamp' : self.date,
+            'iteration' : self.iteration,
+
+            'dataMoved': self.bytesAlreadyMovedStr,
+            'dataLeft' : self.bytesLeftToMoveStr,
+            'dataBeingMoved': self.bytesBeingMovedStr,
+
+            'bytesMoved': self.bytesAlreadyMoved,
+            'bytesLeft' : self.bytesLeftToMove,
+            'bytesBeingMoved': self.bytesBeingMoved,
+          }
+  def __str__(self):
+    return "[ date=%s,iteration=%d, bytesAlreadyMoved=%d, bytesLeftToMove=%d, bytesBeingMoved=%d]"%(self.date, self.iteration, self.bytesAlreadyMoved, self.bytesLeftToMove, self.bytesBeingMoved)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/hdfs_snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/hdfs_snamenode.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/hdfs_snamenode.py
new file mode 100755
index 0000000..3135924
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/hdfs_snamenode.py
@@ -0,0 +1,50 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from utils import service
+
+def snamenode(action=None, format=False):
+  import params
+
+  if action == "configure":
+    for fs_checkpoint_dir in params.fs_checkpoint_dirs:
+      Directory(fs_checkpoint_dir,
+                create_parents=True,
+                cd_access="a",
+                mode=0755,
+                owner=params.hdfs_user,
+                group=params.user_group)
+    File(params.exclude_file_path,
+         content=Template("exclude_hosts_list.j2"),
+         owner=params.hdfs_user,
+         group=params.user_group)
+  elif action == "start" or action == "stop":
+    Directory(params.hadoop_pid_dir_prefix,
+              mode=0755,
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+    service(
+      action=action,
+      name="secondarynamenode",
+      user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/journalnode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/journalnode.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/journalnode.py
new file mode 100755
index 0000000..1bbbd50
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/journalnode.py
@@ -0,0 +1,169 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.version import compare_versions, \
+  format_stack_version
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_XML
+
+from utils import service
+from hdfs import hdfs
+import journalnode_upgrade
+
+
+class JournalNode(Script):
+
+  def get_component_name(self):
+    return "hadoop-hdfs-journalnode"
+
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Stack Upgrade pre-restart")
+    import params
+    env.set_params(params)
+
+    if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hadoop-hdfs-journalnode", params.version)
+      #Execute(format("stack-select set hadoop-hdfs-journalnode {version}"))
+
+  def start(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    self.configure(env)
+    Directory(params.hadoop_pid_dir_prefix,
+              mode=0755,
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+    service(
+      action="start", name="journalnode", user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True
+    )
+
+  def post_upgrade_restart(self, env, upgrade_type=None):
+    if upgrade_type == "nonrolling":
+      return
+
+    Logger.info("Executing Stack Upgrade post-restart")
+    import params
+    env.set_params(params)
+    journalnode_upgrade.post_upgrade_check()
+
+  def stop(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    service(
+      action="stop", name="journalnode", user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True
+    )
+
+  def configure(self, env):
+    import params
+
+    Directory(params.jn_edits_dir,
+              create_parents=True,
+              cd_access="a",
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+    env.set_params(params)
+    hdfs()
+    pass
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    check_process_status(status_params.journalnode_pid_file)
+
+  def security_status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    props_value_check = {"hadoop.security.authentication": "kerberos",
+                         "hadoop.security.authorization": "true"}
+    props_empty_check = ["hadoop.security.auth_to_local"]
+    props_read_check = None
+    core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
+                                                props_read_check)
+
+    props_value_check = None
+    props_empty_check = ['dfs.journalnode.keytab.file',
+                         'dfs.journalnode.kerberos.principal']
+    props_read_check = ['dfs.journalnode.keytab.file']
+    hdfs_site_expectations = build_expectations('hdfs-site', props_value_check, props_empty_check,
+                                                props_read_check)
+
+    hdfs_expectations = {}
+    hdfs_expectations.update(hdfs_site_expectations)
+    hdfs_expectations.update(core_site_expectations)
+
+    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
+                                                 {'core-site.xml': FILE_TYPE_XML})
+    if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
+        security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
+      result_issues = validate_security_config_properties(security_params, hdfs_expectations)
+      if not result_issues:  # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if ('hdfs-site' not in security_params or
+                  'dfs.journalnode.kerberos.keytab.file' not in security_params['hdfs-site'] or
+                  'dfs.journalnode.kerberos.principal' not in security_params['hdfs-site']):
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out(
+              {"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.hdfs_user,
+                                security_params['hdfs-site']['dfs.journalnode.kerberos.keytab.file'],
+                                security_params['hdfs-site']['dfs.journalnode.kerberos.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+
+if __name__ == "__main__":
+  JournalNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/journalnode_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/journalnode_upgrade.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/journalnode_upgrade.py
new file mode 100755
index 0000000..91d93a6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/package/scripts/journalnode_upgrade.py
@@ -0,0 +1,136 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import time
+
+from resource_management.core.logger import Logger
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions.default import default
+from resource_management.core.exceptions import Fail
+from utils import get_jmx_data
+from namenode_ha_state import NAMENODE_STATE, NamenodeHAState
+
+
+def post_upgrade_check():
+  """
+  Ensure all journal nodes are up and quorum is established
+  :return:
+  """
+  import params
+  Logger.info("Ensuring Journalnode quorum is established")
+
+  if params.security_enabled:
+    Execute(params.jn_kinit_cmd, user=params.hdfs_user)
+
+  time.sleep(5)
+  hdfs_roll_edits()
+  time.sleep(5)
+
+  all_journal_node_hosts = default("/clusterHostInfo/journalnode_hosts", [])
+
+  if len(all_journal_node_hosts) < 3:
+    raise Fail("Need at least 3 Journalnodes to maintain a quorum")
+
+  try:
+    namenode_ha = NamenodeHAState()
+  except ValueError, err:
+    raise Fail("Could not retrieve Namenode HA addresses. Error: " + str(err))
+
+  Logger.info(str(namenode_ha))
+  nn_address = namenode_ha.get_address(NAMENODE_STATE.ACTIVE)
+
+  nn_data = get_jmx_data(nn_address, 'org.apache.hadoop.hdfs.server.namenode.FSNamesystem', 'JournalTransactionInfo',
+                         namenode_ha.is_encrypted())
+  if not nn_data:
+    raise Fail("Could not retrieve JournalTransactionInfo from JMX")
+
+  try:
+    last_txn_id = int(nn_data['LastAppliedOrWrittenTxId'])
+    success = ensure_jns_have_new_txn(all_journal_node_hosts, last_txn_id)
+
+    if not success:
+      raise Fail("Could not ensure that all Journal nodes have a new log transaction id")
+  except KeyError:
+    raise Fail("JournalTransactionInfo does not have key LastAppliedOrWrittenTxId from JMX info")
+
+
+def hdfs_roll_edits():
+  """
+  HDFS_CLIENT needs to be a dependency of JOURNALNODE
+  Roll the logs so that Namenode will be able to connect to the Journalnode.
+  Must kinit before calling this command.
+  """
+  import params
+
+  # TODO, this will be to be doc'ed since existing IOP 4.0 clusters will needs HDFS_CLIENT on all JOURNALNODE hosts
+  command = 'hdfs dfsadmin -rollEdits'
+  Execute(command, user=params.hdfs_user, tries=1)
+
+
+def ensure_jns_have_new_txn(nodes, last_txn_id):
+  """
+  :param nodes: List of Journalnodes
+  :param last_txn_id: Integer of last transaction id
+  :return: Return true on success, false otherwise
+  """
+  import params
+
+  num_of_jns = len(nodes)
+  actual_txn_ids = {}
+  jns_updated = 0
+
+  if params.journalnode_address is None:
+    raise Fail("Could not retrieve Journal node address")
+
+  if params.journalnode_port is None:
+    raise Fail("Could not retrieve Journalnode port")
+
+  time_out_secs = 3 * 60
+  step_time_secs = 10
+  iterations = int(time_out_secs/step_time_secs)
+
+  protocol = "https" if params.https_only else "http"
+
+  Logger.info("Checking if all Journalnodes are updated.")
+  for i in range(iterations):
+    Logger.info('Try %d out of %d' % (i+1, iterations))
+    for node in nodes:
+      # if all JNS are updated break
+      if jns_updated == num_of_jns:
+        Logger.info("All journal nodes are updated")
+        return True
+
+      # JN already meets condition, skip it
+      if node in actual_txn_ids and actual_txn_ids[node] and actual_txn_ids[node] >= last_txn_id:
+        continue
+
+      url = '%s://%s:%s' % (protocol, node, params.journalnode_port)
+      data = get_jmx_data(url, 'Journal-', 'LastWrittenTxId')
+      if data:
+        actual_txn_ids[node] = int(data)
+        if actual_txn_ids[node] >= last_txn_id:
+          Logger.info("Journalnode %s has a higher transaction id: %s" % (node, str(data)))
+          jns_updated += 1
+        else:
+          Logger.info("Journalnode %s is still on transaction id: %s" % (node, str(data)))
+
+    Logger.info("Sleeping for %d secs" % step_time_secs)
+    time.sleep(step_time_secs)
+
+  return jns_updated == num_of_jns
\ No newline at end of file


[34/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hive-site.xml
new file mode 100755
index 0000000..5f2bc18
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/hive-site.xml
@@ -0,0 +1,1111 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+<configuration supports_final="true">
+
+  <property>
+    <name>hive.cbo.enable</name>
+    <value>true</value>
+    <description>Flag to control enabling Cost Based Optimizations using Calcite framework.</description>
+  </property>
+
+  <property>
+    <name>hive.zookeeper.quorum</name>
+    <value>localhost:2181</value>
+    <description>
+      List of ZooKeeper servers to talk to. This is needed for: 1.
+      Read/write locks - when hive.lock.manager is set to
+      org.apache.hadoop.hive.ql.lockmgr.zookeeper.ZooKeeperHiveLockManager,
+      2. When HiveServer2 supports service discovery via Zookeeper.
+    </description>
+    <value-attributes>
+      <type>multiLine</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.metastore.connect.retries</name>
+    <value>24</value>
+    <description>Number of retries while opening a connection to metastore</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.failure.retries</name>
+    <value>24</value>
+    <description>Number of retries upon failure of Thrift metastore calls</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.client.connect.retry.delay</name>
+    <value>5s</value>
+    <description>
+      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
+      Number of seconds for the client to wait between consecutive connection attempts
+    </description>
+  </property>
+
+  <property>
+    <name>hive.metastore.client.socket.timeout</name>
+    <value>60</value>
+    <description>
+      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
+      MetaStore Client socket timeout in seconds
+    </description>
+  </property>
+
+  <property>
+    <name>hive.mapjoin.bucket.cache.size</name>
+    <value>10000</value>
+    <description>
+      Size per reducer.The default is 1G, i.e if the input size is 10G, it
+      will use 10 reducers.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.security.authorization.manager</name>
+    <value>org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdConfOnlyAuthorizerFactory</value>
+    <description>
+      The Hive client authorization manager class name. The user defined authorization class should implement
+      interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.cluster.delegation.token.store.class</name>
+    <value>org.apache.hadoop.hive.thrift.ZooKeeperTokenStore</value>
+    <description>The delegation token store implementation.
+      Set to org.apache.hadoop.hive.thrift.ZooKeeperTokenStore for load-balanced cluster.</description>
+  </property>
+
+  <property>
+    <name>hive.cluster.delegation.token.store.zookeeper.connectString</name>
+    <value>localhost:2181</value>
+    <description>The ZooKeeper token store connect string.</description>
+  </property>
+
+  <property>
+    <name>hive.server2.support.dynamic.service.discovery</name>
+    <value>true</value>
+    <description>Whether HiveServer2 supports dynamic service discovery for its clients.
+      To support this, each instance of HiveServer2 currently uses ZooKeeper to register itself,
+      when it is brought up. JDBC/ODBC clients should use the ZooKeeper ensemble: hive.zookeeper.quorum
+      in their connection string.
+    </description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>fs.hdfs.impl.disable.cache</name>
+    <value>true</value>
+    <description>Disable HDFS filesystem cache.</description>
+  </property>
+
+  <property>
+    <name>fs.file.impl.disable.cache</name>
+    <value>true</value>
+    <description>Disable local filesystem cache.</description>
+  </property>
+
+  <property>
+    <name>hive.exec.scratchdir</name>
+    <value>/tmp/hive</value>
+    <description>HDFS root scratch dir for Hive jobs which gets created with write all (733) permission. For each connecting user, an HDFS scratch dir: ${hive.exec.scratchdir}/&lt;username&gt; is created, with ${hive.scratch.dir.permission}.</description>
+  </property>
+
+  <property>
+    <name>hive.exec.submitviachild</name>
+    <value>false</value>
+    <description/>
+  </property>
+
+  <property>
+    <name>hive.exec.submit.local.task.via.child</name>
+    <value>true</value>
+    <description>
+      Determines whether local tasks (typically mapjoin hashtable generation phase) runs in
+      separate JVM (true recommended) or not.
+      Avoids the overhead of spawning new JVM, but can lead to out-of-memory issues.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.exec.compress.output</name>
+    <value>false</value>
+    <description>
+      This controls whether the final outputs of a query (to a local/HDFS file or a Hive table) is compressed.
+      The compression codec and other options are determined from Hadoop config variables mapred.output.compress*
+    </description>
+  </property>
+
+  <property>
+    <name>hive.exec.compress.intermediate</name>
+    <value>false</value>
+    <description>
+      This controls whether intermediate files produced by Hive between multiple map-reduce jobs are compressed.
+      The compression codec and other options are determined from Hadoop config variables mapred.output.compress*
+    </description>
+  </property>
+
+  <property>
+    <name>hive.exec.reducers.bytes.per.reducer</name>
+    <value>67108864</value>
+    <description>size per reducer.The default is 256Mb, i.e if the input size is 1G, it will use 4 reducers.</description>
+  </property>
+
+  <property>
+    <name>hive.exec.reducers.max</name>
+    <value>1009</value>
+    <description>
+      max number of reducers will be used. If the one specified in the configuration parameter mapred.reduce.tasks is
+      negative, Hive will use this one as the max number of reducers when automatically determine number of reducers.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.exec.pre.hooks</name>
+    <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
+    <description>
+      Comma-separated list of pre-execution hooks to be invoked for each statement.
+      A pre-execution hook is specified as the name of a Java class which implements the
+      org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.exec.post.hooks</name>
+    <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
+    <description>
+      Comma-separated list of post-execution hooks to be invoked for each statement.
+      A post-execution hook is specified as the name of a Java class which implements the
+      org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.exec.failure.hooks</name>
+    <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
+    <description>
+      Comma-separated list of on-failure hooks to be invoked for each statement.
+      An on-failure hook is specified as the name of Java class which implements the
+      org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.exec.parallel</name>
+    <value>false</value>
+    <description>Whether to execute jobs in parallel</description>
+  </property>
+
+  <property>
+    <name>hive.exec.parallel.thread.number</name>
+    <value>8</value>
+    <description>How many jobs at most can be executed in parallel</description>
+  </property>
+
+  <property>
+    <name>hive.mapred.reduce.tasks.speculative.execution</name>
+    <value>false</value>
+    <description>Whether speculative execution for reducers should be turned on. </description>
+  </property>
+
+  <property>
+    <name>hive.exec.dynamic.partition</name>
+    <value>true</value>
+    <description>Whether or not to allow dynamic partitions in DML/DDL.</description>
+  </property>
+
+  <property>
+    <name>hive.exec.dynamic.partition.mode</name>
+    <value>nonstrict</value>
+    <description>
+      In strict mode, the user must specify at least one static partition
+      in case the user accidentally overwrites all partitions.
+      NonStrict allows all partitions of a table to be dynamic.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.exec.max.dynamic.partitions</name>
+    <value>5000</value>
+    <description>Maximum number of dynamic partitions allowed to be created in total.</description>
+  </property>
+
+  <property>
+    <name>hive.exec.max.dynamic.partitions.pernode</name>
+    <value>2000</value>
+    <description>Maximum number of dynamic partitions allowed to be created in each mapper/reducer node.</description>
+  </property>
+
+  <property>
+    <name>hive.exec.max.created.files</name>
+    <value>100000</value>
+    <description>Maximum number of HDFS files created by all mappers/reducers in a MapReduce job.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.warehouse.dir</name>
+    <value>/apps/hive/warehouse</value>
+    <description>location of default database for the warehouse</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.uris</name>
+    <value>thrift://localhost:9083</value>
+    <description>Thrift URI for the remote metastore. Used by metastore client to connect to remote metastore.</description>
+  </property>
+
+  <property require-input = "true">
+    <name>javax.jdo.option.ConnectionPassword</name>
+    <display-name>Database Password</display-name>
+    <value></value>
+    <property-type>PASSWORD</property-type>
+    <description>password to use against metastore database</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>javax.jdo.option.ConnectionURL</name>
+    <display-name>Database URL</display-name>
+    <value>jdbc:mysql://localhost/hive?createDatabaseIfNotExist=true</value>
+    <description>JDBC connect string for a JDBC metastore</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.metastore.server.max.threads</name>
+    <value>100000</value>
+    <description>Maximum number of worker threads in the Thrift server's pool.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.kerberos.keytab.file</name>
+    <value>/etc/security/keytabs/hive.service.keytab</value>
+    <description>The path to the Kerberos Keytab file containing the metastore Thrift server's service principal.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.kerberos.principal</name>
+    <value>hive/_HOST@EXAMPLE.COM</value>
+    <description>
+      The service principal for the metastore Thrift server.
+      The special string _HOST will be replaced automatically with the correct host name.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.cluster.delegation.token.store.zookeeper.znode</name>
+    <value>/hive/cluster/delegation</value>
+    <description>The root path for token store data.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.cache.pinobjtypes</name>
+    <value>Table,Database,Type,FieldSchema,Order</value>
+    <description>List of comma separated metastore object types that should be pinned in the cache</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.pre.event.listeners</name>
+    <value>org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener</value>
+    <description>Pre-event listener classes to be loaded on the metastore side to run code
+      whenever databases, tables, and partitions are created, altered, or dropped.
+      Set to org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener
+      if metastore-side authorization is desired.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.authorization.storage.checks</name>
+    <value>false</value>
+    <description>
+      Should the metastore do authorization checks against the underlying storage (usually hdfs)
+      for operations like drop-partition (disallow the drop-partition if the user in
+      question doesn't have permissions to delete the corresponding directory
+      on the storage).
+    </description>
+  </property>
+
+  <property>
+    <name>javax.jdo.option.ConnectionDriverName</name>
+    <display-name>JDBC Driver Class</display-name>
+    <value>com.mysql.jdbc.Driver</value>
+    <description>Driver class name for a JDBC metastore</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>javax.jdo.option.ConnectionUserName</name>
+    <display-name>Database Username</display-name>
+    <value>hive</value>
+    <description>Username to use against metastore database</description>
+    <value-attributes>
+      <type>db_user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.map.aggr</name>
+    <value>true</value>
+    <description>Whether to use map-side aggregation in Hive Group By queries</description>
+  </property>
+
+  <property>
+    <name>hive.smbjoin.cache.rows</name>
+    <value>10000</value>
+    <description>How many rows with the same key value should be cached in memory per smb joined table.</description>
+  </property>
+
+  <property>
+    <name>hive.map.aggr.hash.percentmemory</name>
+    <value>0.5</value>
+    <description>Portion of total memory to be used by map-side group aggregation hash table</description>
+  </property>
+
+  <property>
+    <name>hive.map.aggr.hash.force.flush.memory.threshold</name>
+    <value>0.9</value>
+    <description>
+      The max memory to be used by map-side group aggregation hash table.
+      If the memory usage is higher than this number, force to flush data
+    </description>
+  </property>
+
+  <property>
+    <name>hive.map.aggr.hash.min.reduction</name>
+    <value>0.5</value>
+    <description>
+      Hash aggregation will be turned off if the ratio between hash  table size and input rows is bigger than this number.
+      Set to 1 to make sure hash aggregation is never turned off.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.merge.mapfiles</name>
+    <value>true</value>
+    <description>Merge small files at the end of a map-only job</description>
+  </property>
+
+  <property>
+    <name>hive.merge.mapredfiles</name>
+    <value>false</value>
+    <description>Merge small files at the end of a map-reduce job</description>
+  </property>
+
+  <property>
+    <name>hive.merge.size.per.task</name>
+    <value>256000000</value>
+    <description>Size of merged files at the end of the job</description>
+  </property>
+
+  <property>
+    <name>hive.merge.smallfiles.avgsize</name>
+    <value>16000000</value>
+    <description>
+      When the average output file size of a job is less than this number, Hive will start an additional
+      map-reduce job to merge the output files into bigger files. This is only done for map-only jobs
+      if hive.merge.mapfiles is true, and for map-reduce jobs if hive.merge.mapredfiles is true.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.merge.rcfile.block.level</name>
+    <value>true</value>
+    <description/>
+  </property>
+
+  <property>
+    <name>hive.merge.orcfile.stripe.level</name>
+    <value>true</value>
+    <description>
+      When hive.merge.mapfiles or hive.merge.mapredfiles is enabled while writing a
+      table with ORC file format, enabling this config will do stripe level fast merge
+      for small ORC files. Note that enabling this config will not honor padding tolerance
+      config (hive.exec.orc.block.padding.tolerance).
+    </description>
+  </property>
+
+  <property>
+    <name>hive.exec.orc.default.stripe.size</name>
+    <value>67108864</value>
+    <description>Define the default ORC stripe size</description>
+  </property>
+
+  <property>
+    <name>hive.exec.orc.default.compress</name>
+    <value>ZLIB</value>
+    <description>Define the default compression codec for ORC file</description>
+  </property>
+
+  <property>
+    <name>hive.exec.orc.compression.strategy</name>
+    <value>SPEED</value>
+    <description>
+      Expects one of [speed, compression].
+      Define the compression strategy to use while writing data.
+      This changes the compression level of higher level compression codec (like ZLIB).
+    </description>
+  </property>
+
+  <property>
+    <name>hive.orc.splits.include.file.footer</name>
+    <value>false</value>
+    <description>
+      If turned on splits generated by orc will include metadata about the stripes in the file. This
+      data is read remotely (from the client or HS2 machine) and sent to all the tasks.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.orc.compute.splits.num.threads</name>
+    <value>10</value>
+    <description>How many threads orc should use to create splits in parallel.</description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.join</name>
+    <value>true</value>
+    <description>Whether Hive enables the optimization about converting common join into mapjoin based on the input file size</description>
+  </property>
+  <property>
+    <name>hive.auto.convert.join.noconditionaltask</name>
+    <value>true</value>
+    <description>
+      Whether Hive enables the optimization about converting common join into mapjoin based on the input file size.
+      If this parameter is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the
+      specified size, the join is directly converted to a mapjoin (there is no conditional task).
+    </description>
+  </property>
+
+  <property>
+    <name>hive.limit.optimize.enable</name>
+    <value>true</value>
+    <description>Whether to enable to optimization to trying a smaller subset of data for simple LIMIT first.</description>
+  </property>
+
+  <property>
+    <name>hive.enforce.bucketing</name>
+    <value>true</value>
+    <description>Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced.</description>
+  </property>
+  <property>
+    <name>hive.enforce.sorting</name>
+    <value>true</value>
+    <description>Whether sorting is enforced. If true, while inserting into the table, sorting is enforced.</description>
+  </property>
+
+
+  <property>
+    <name>hive.enforce.sortmergebucketmapjoin</name>
+    <value>true</value>
+    <description>If the user asked for sort-merge bucketed map-side join, and it cannot be performed, should the query fail or not ?</description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.sortmerge.join</name>
+    <value>true</value>
+    <description>Will the join be automatically converted to a sort-merge join, if the joined tables pass the criteria for sort-merge join.</description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.sortmerge.join.to.mapjoin</name>
+    <value>false</value>
+    <description>
+      If hive.auto.convert.sortmerge.join is set to true, and a join was converted to a sort-merge join,
+      this parameter decides whether each table should be tried as a big table, and effectively a map-join should be
+      tried. That would create a conditional task with n+1 children for a n-way join (1 child for each table as the
+      big table), and the backup task will be the sort-merge join. In some cases, a map-join would be faster than a
+      sort-merge join, if there is no advantage of having the output bucketed and sorted. For example, if a very big sorted
+      and bucketed table with few files (say 10 files) are being joined with a very small sorter and bucketed table
+      with few files (10 files), the sort-merge join will only use 10 mappers, and a simple map-only join might be faster
+      if the complete small table can fit in memory, and a map-join can be performed.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.optimize.constant.propagation</name>
+    <value>true</value>
+    <description>Whether to enable constant propagation optimizer</description>
+  </property>
+  <property>
+    <name>hive.optimize.metadataonly</name>
+    <value>true</value>
+    <description/>
+  </property>
+  <property>
+    <name>hive.optimize.null.scan</name>
+    <value>true</value>
+    <description>Dont scan relations which are guaranteed to not generate any rows</description>
+  </property>
+
+
+  <property>
+    <name>hive.optimize.bucketmapjoin</name>
+    <value>true</value>
+    <description>If the tables being joined are bucketized on the join columns, and the number of buckets in one table
+      is a multiple of the number of buckets in the other table, the buckets can be joined with each other by setting
+      this parameter as true.</description>
+  </property>
+
+  <property>
+    <name>hive.optimize.reducededuplication</name>
+    <value>true</value>
+    <description>
+      Remove extra map-reduce jobs if the data is already clustered by the same key which needs to be used again.
+      This should always be set to true. Since it is a new feature, it has been made configurable.
+    </description>
+  </property>
+  <property>
+    <name>hive.optimize.reducededuplication.min.reducer</name>
+    <value>4</value>
+    <description>
+      Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS.
+      That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.
+      The optimization will be automatically disabled if number of reducers would be less than specified value.
+    </description>
+  </property>
+  <property>
+    <name>hive.optimize.sort.dynamic.partition</name>
+    <value>false</value>
+    <description>
+      When enabled dynamic partitioning column will be globally sorted.
+      This way we can keep only one record writer open for each partition value
+      in the reducer thereby reducing the memory pressure on reducers.
+    </description>
+  </property>
+  <property>
+    <name>hive.stats.autogather</name>
+    <value>true</value>
+    <description>A flag to gather statistics automatically during the INSERT OVERWRITE command.</description>
+  </property>
+  <property>
+    <name>hive.stats.dbclass</name>
+    <value>fs</value>
+    <description>
+      Expects one of the pattern in [jdbc(:.*), hbase, counter, custom, fs].
+      The storage that stores temporary Hive statistics. Currently, jdbc, hbase, counter and custom type are supported.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.stats.fetch.partition.stats</name>
+    <value>true</value>
+    <description>
+      Annotation of operator tree with statistics information requires partition level basic
+      statistics like number of rows, data size and file size. Partition statistics are fetched from
+      metastore. Fetching partition statistics for each needed partition can be expensive when the
+      number of partitions is high. This flag can be used to disable fetching of partition statistics
+      from metastore. When this flag is disabled, Hive will make calls to filesystem to get file sizes
+      and will estimate the number of rows from row schema.
+    </description>
+  </property>
+  <property>
+    <name>hive.stats.fetch.column.stats</name>
+    <value>false</value>
+    <description>
+      Annotation of operator tree with statistics information requires column statistics.
+      Column statistics are fetched from metastore. Fetching column statistics for each needed column
+      can be expensive when the number of columns is high. This flag can be used to disable fetching
+      of column statistics from metastore.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.zookeeper.client.port</name>
+    <value>2181</value>
+    <description>The port of ZooKeeper servers to talk to. If the list of Zookeeper servers specified in hive.zookeeper.quorum,does not contain port numbers, this value is used.</description>
+  </property>
+
+  <property>
+    <name>hive.zookeeper.namespace</name>
+    <value>hive_zookeeper_namespace</value>
+    <description>The parent node under which all ZooKeeper nodes are created.</description>
+  </property>
+
+  <property>
+    <name>hive.txn.manager</name>
+    <value>org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager</value>
+    <description/>
+  </property>
+
+  <property>
+    <name>hive.txn.max.open.batch</name>
+    <value>1000</value>
+    <description>
+      Maximum number of transactions that can be fetched in one call to open_txns().
+      Increasing this will decrease the number of delta files created when
+      streaming data into Hive.  But it will also increase the number of
+      open transactions at any given time, possibly impacting read performance.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.support.concurrency</name>
+    <value>false</value>
+    <description>
+      Support concurrency and use locks, needed for Transactions. Requires Zookeeper.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.cli.print.header</name>
+    <value>false</value>
+    <description>
+      Whether to print the names of the columns in query output.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.compactor.worker.timeout</name>
+    <value>86400L</value>
+    <description>
+      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
+      Time before a given compaction in working state is declared a failure
+      and returned to the initiated state.
+    </description>
+  </property>
+  <property>
+    <name>hive.compactor.check.interval</name>
+    <value>300L</value>
+    <description>
+      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
+      Time between checks to see if any partitions need compacted.
+      This should be kept high because each check for compaction requires many calls against the NameNode.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.compactor.delta.pct.threshold</name>
+    <value>0.1f</value>
+    <description>Percentage (by size) of base that deltas can be before major compaction is initiated.</description>
+  </property>
+  <property>
+    <name>hive.fetch.task.conversion</name>
+    <value>more</value>
+    <description>
+      Expects one of [none, minimal, more].
+      Some select queries can be converted to single FETCH task minimizing latency.
+      Currently the query should be single sourced not having any subquery and should not have
+      any aggregations or distincts (which incurs RS), lateral views and joins.
+      0. none : disable hive.fetch.task.conversion
+      1. minimal : SELECT STAR, FILTER on partition columns, LIMIT only
+      2. more    : SELECT, FILTER, LIMIT only (support TABLESAMPLE and virtual columns)
+    </description>
+  </property>
+  <property>
+    <name>hive.fetch.task.conversion.threshold</name>
+    <value>1073741824</value>
+    <description>
+      Input threshold for applying hive.fetch.task.conversion. If target table is native, input length
+      is calculated by summation of file lengths. If it's not native, storage handler for the table
+      can optionally implement org.apache.hadoop.hive.ql.metadata.InputEstimator interface.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.fetch.task.aggr</name>
+    <value>false</value>
+    <description>
+      Aggregation queries with no group-by clause (for example, select count(*) from src) execute
+      final aggregations in single reduce task. If this is set true, Hive delegates final aggregation
+      stage to fetch task, possibly decreasing the query time.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.security.authorization.enabled</name>
+    <value>false</value>
+    <description>enable or disable the Hive client authorization</description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.security.authenticator.manager</name>
+    <value>org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator</value>
+    <description>
+      hive client authenticator manager class name. The user defined authenticator should implement
+      interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.security.metastore.authorization.manager</name>
+    <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider,org.apache.hadoop.hive.ql.security.authorization.MetaStoreAuthzAPIAuthorizerEmbedOnly</value>
+    <description>
+      authorization manager class name to be used in the metastore for authorization.
+      The user defined authorization class should implement interface
+      org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.security.metastore.authorization.auth.reads</name>
+    <value>true</value>
+    <description>If this is true, metastore authorizer authorizes read actions on database, table</description>
+  </property>
+
+  <property>
+    <name>hive.security.metastore.authenticator.manager</name>
+    <value>org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator</value>
+    <description>
+      authenticator manager class name to be used in the metastore for authentication.
+      The user defined authenticator should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.server2.logging.operation.enabled</name>
+    <value>true</value>
+    <description>When true, HS2 will save operation logs</description>
+  </property>
+
+  <property>
+    <name>hive.server2.logging.operation.log.location</name>
+    <value>${java.io.tmpdir}/${user.name}/operation_logs</value>
+    <description>Top level directory where operation logs are stored if logging functionality is enabled</description>
+  </property>
+
+  <property>
+    <name>hive.server2.zookeeper.namespace</name>
+    <value>hiveserver2</value>
+    <description>The parent node in ZooKeeper used by HiveServer2 when supporting dynamic service discovery.</description>
+  </property>
+
+  <property>
+    <name>hive.server2.transport.mode</name>
+    <value>binary</value>
+    <description>
+      Expects one of [binary, http].
+      Transport mode of HiveServer2.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.server2.thrift.http.port</name>
+    <value>10001</value>
+    <description>Port number of HiveServer2 Thrift interface when hive.server2.transport.mode is 'http'.</description>
+  </property>
+
+  <property>
+    <name>hive.server2.thrift.http.path</name>
+    <value>cliservice</value>
+    <description>Path component of URL endpoint when in HTTP mode.</description>
+  </property>
+
+  <property>
+    <name>hive.server2.thrift.port</name>
+    <display-name>HiveServer2 Port</display-name>
+    <value>10000</value>
+    <description>Port number of HiveServer2 Thrift interface when hive.server2.transport.mode is 'binary'.</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>int</type>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.server2.thrift.sasl.qop</name>
+    <value>auth</value>
+    <description>
+      Expects one of [auth, auth-int, auth-conf].
+      Sasl QOP value; Set it to one of following values to enable higher levels of
+      protection for HiveServer2 communication with clients.
+      "auth" - authentication only (default)
+      "auth-int" - authentication plus integrity protection
+      "auth-conf" - authentication plus integrity and confidentiality protection
+      This is applicable only if HiveServer2 is configured to use Kerberos authentication.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.server2.thrift.max.worker.threads</name>
+    <value>500</value>
+    <description>Maximum number of Thrift worker threads</description>
+  </property>
+
+  <property>
+    <name>hive.server2.allow.user.substitution</name>
+    <value>true</value>
+    <description>Allow alternate user to be specified as part of HiveServer2 open connection request.</description>
+  </property>
+
+  <property>
+    <name>hive.server2.authentication.spnego.keytab</name>
+    <value>HTTP/_HOST@EXAMPLE.COM</value>
+    <description>
+      keytab file for SPNego principal, optional,
+      typical value would look like /etc/security/keytabs/spnego.service.keytab,
+      This keytab would be used by HiveServer2 when Kerberos security is enabled and
+      HTTP transport mode is used.
+      This needs to be set only if SPNEGO is to be used in authentication.
+      SPNego authentication would be honored only if valid
+      hive.server2.authentication.spnego.principal
+      and
+      hive.server2.authentication.spnego.keytab
+      are specified.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.server2.authentication</name>
+    <description>Authentication mode, default NONE. Options are NONE, NOSASL, KERBEROS, LDAP, PAM and CUSTOM</description>
+    <value>NONE</value>
+  </property>
+
+  <property>
+    <name>hive.server2.authentication.spnego.principal</name>
+    <value>/etc/security/keytabs/spnego.service.keytab</value>
+    <description>
+      SPNego service principal, optional,
+      typical value would look like HTTP/_HOST@EXAMPLE.COM
+      SPNego service principal would be used by HiveServer2 when Kerberos security is enabled
+      and HTTP transport mode is used.
+      This needs to be set only if SPNEGO is to be used in authentication.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.server2.enable.doAs</name>
+    <value>true</value>
+    <description>
+      Impersonate the connected user. By default HiveServer2 performs the query processing as the user who
+      submitted the query. But if the parameter is set to false, the query will run as the user that the hiveserver2
+      process runs as.
+    </description>
+  </property>
+  <property>
+    <name>hive.server2.table.type.mapping</name>
+    <value>CLASSIC</value>
+    <description>
+      Expects one of [classic, hive].
+      This setting reflects how HiveServer2 will report the table types for JDBC and other
+      client implementations that retrieve the available tables and supported table types
+      HIVE : Exposes Hive's native table types like MANAGED_TABLE, EXTERNAL_TABLE, VIRTUAL_VIEW
+      CLASSIC : More generic types like TABLE and VIEW
+    </description>
+  </property>
+
+  <property>
+    <name>hive.server2.use.SSL</name>
+    <value>false</value>
+    <description/>
+  </property>
+  <property>
+    <name>hive.server2.keystore.path</name>
+    <value>/etc/security/keystores/hs2keystore.jks</value>
+    <description>SSL certificate keystore location</description>
+  </property>
+  <property>
+    <name>hive.server2.keystore.password</name>
+    <value>password</value>
+    <property-type>PASSWORD</property-type>
+    <description>SSL certificate keystore password</description>
+  </property>
+
+  <property>
+    <name>hive.conf.restricted.list</name>
+    <value>hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role</value>
+    <description>Comma separated list of configuration options which are immutable at runtime</description>
+  </property>
+
+  <property>
+    <name>hive.vectorized.groupby.maxentries</name>
+    <value>100000</value>
+    <description>
+      Max number of entries in the vector group by aggregation hashtables.
+      Exceeding this will trigger a flush irrelevant of memory pressure condition.
+    </description>
+  </property>
+
+  <!-- missing from HiveConf -->
+  <property>
+    <name>ambari.hive.db.schema.name</name>
+    <display-name>Database Name</display-name>
+    <value>hive</value>
+    <description>Database name used as the Hive Metastore</description>
+    <value-attributes>
+      <type>database</type>
+      <type>host</type>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.metastore.sasl.enabled</name>
+    <value>false</value>
+    <description>If true, the metastore thrift interface will be secured with SASL.
+     Clients must authenticate with Kerberos.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.execute.setugi</name>
+    <value>true</value>
+    <description>In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.</description>
+  </property>
+
+  <property>
+    <name>hive.optimize.bucketmapjoin.sortedmerge</name>
+    <value>false</value>
+    <description> If the tables being joined are sorted and bucketized on the join columns, and they have the same number
+    of buckets, a sort-merge join can be performed by setting this parameter as true.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.join.noconditionaltask.size</name>
+    <value>1000000000</value>
+    <description>If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. However, if it
+      is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, the join is directly
+      converted to a mapjoin(there is no conditional task). The default is 10MB.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.vectorized.execution.enabled</name>
+    <value>true</value>
+    <description>
+      This flag should be set to true to enable vectorized mode of query execution.
+      The default value is false.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.vectorized.execution.reduce.enabled</name>
+    <value>false</value>
+    <description>
+      This flag should be set to true to enable vectorized mode of the reduce-side of query execution.
+      The default value is true.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.optimize.index.filter</name>
+    <value>true</value>
+    <description>
+	Whether to enable automatic use of indexes
+	</description>
+  </property>
+
+  <property>
+    <name>hive.execution.engine</name>
+    <value>mr</value>
+    <description>
+      Chooses execution engine. The Option is : mr (Map reduce, default)
+    </description>
+  </property>
+
+  <property>
+    <name>hive.vectorized.groupby.checkinterval</name>
+    <value>1024</value>
+	<!--value>4096</value-->
+    <description>Number of entries added to the group by aggregation hash before a recomputation of average entry size is performed.</description>
+  </property>
+
+  <property>
+    <name>hive.vectorized.groupby.flush.percent</name>
+    <value>0.1</value>
+    <description>Percent of entries in the group by aggregation hash flushed when the memory threshold is exceeded.</description>
+  </property>
+
+  <property>
+    <name>hive.compute.query.using.stats</name>
+    <value>true</value>
+    <description>
+      When set to true Hive will answer a few queries like count(1) purely using stats
+      stored in metastore. For basic stats collection turn on the config hive.stats.autogather to true.
+      For more advanced stats collection need to run analyze table queries.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.limit.pushdown.memory.usage</name>
+    <value>0.04</value>
+    <description>The max memory to be used for hash in RS operator for top K selection.</description>
+  </property>
+
+  <property>
+    <name>hive.txn.timeout</name>
+    <value>300</value>
+    <description>Time after which transactions are declared aborted if the client has not sent a heartbeat, in seconds.</description>
+  </property>
+
+  <property>
+    <name>hive.compactor.initiator.on</name>
+    <value>false</value>
+    <description>Whether to run the compactor's initiator thread in this metastore instance or not. If there is more than one instance of the thrift metastore this should only be set to true for one of them.</description>
+  </property>
+
+  <property>
+    <name>hive.compactor.worker.threads</name>
+    <value>0</value>
+    <description>Number of compactor worker threads to run on this metastore instance. Can be different values on different metastore instances.</description>
+  </property>
+
+  <property>
+    <name>hive.compactor.delta.num.threshold</name>
+    <value>10</value>
+    <description>Number of delta files that must exist in a directory before the compactor will attempt a minor compaction.</description>
+  </property>
+
+  <property>
+    <name>hive.compactor.abortedtxn.threshold</name>
+    <value>1000</value>
+    <description>Number of aborted transactions involving a particular table or partition before major compaction is initiated.</description>
+  </property>
+
+  <property>
+    <name>datanucleus.cache.level2.type</name>
+    <value>none</value>
+    <description>Determines caching mechanism DataNucleus L2 cache will use. It is strongly recommended to use default value of 'none' as other values may cause consistency errors in Hive.</description>
+  </property>
+
+  <property>
+    <name>hive.index.compact.query.max.size</name>
+    <value>10737418240</value>
+    <description>The maximum number of bytes that a query using the compact index can read.
+      Negative value is equivalent to infinity.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.warehouse.subdir.inherit.perms</name>
+    <value>true</value>
+    <description>Set this to true if table directories should inherit the permissions of the warehouse or database directory instead of being created with permissions derived from dfs umask
+    </description>
+  </property>
+
+  <property>
+    <name>hive.start.cleanup.scratchdir</name>
+    <value>true</value>
+    <description>To cleanup the hive scratchdir while starting the hive server.</description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/webhcat-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/webhcat-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/webhcat-env.xml
new file mode 100755
index 0000000..2eec231
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/webhcat-env.xml
@@ -0,0 +1,54 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <!-- webhcat-env.sh -->
+  <property>
+    <name>content</name>
+    <description>webhcat-env.sh content</description>
+    <value>
+# The file containing the running pid
+PID_FILE={{webhcat_pid_file}}
+
+TEMPLETON_LOG_DIR={{templeton_log_dir}}/
+
+
+WEBHCAT_LOG_DIR={{templeton_log_dir}}/
+
+# The console error log
+ERROR_LOG={{templeton_log_dir}}/webhcat-console-error.log
+
+# The console log
+CONSOLE_LOG={{templeton_log_dir}}/webhcat-console.log
+
+#TEMPLETON_JAR=templeton_jar_name
+
+#HADOOP_PREFIX=hadoop_prefix
+
+#HCAT_PREFIX=hive_prefix
+
+# Set HADOOP_HOME to point to a specific hadoop install directory
+export HADOOP_HOME={{hadoop_home}}
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/webhcat-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/webhcat-log4j.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/webhcat-log4j.xml
new file mode 100755
index 0000000..0ded4d4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/webhcat-log4j.xml
@@ -0,0 +1,78 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <description>Custom webhcat-log4j.properties</description>
+    <value>
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# Define some default values that can be overridden by system properties
+webhcat.root.logger = INFO, standard
+webhcat.log.dir = .
+webhcat.log.file = webhcat.log
+
+log4j.rootLogger = ${webhcat.root.logger}
+
+# Logging Threshold
+log4j.threshhold = DEBUG
+
+log4j.appender.standard  =  org.apache.log4j.DailyRollingFileAppender
+log4j.appender.standard.File = ${webhcat.log.dir}/${webhcat.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern = .yyyy-MM-dd
+
+log4j.appender.DRFA.layout = org.apache.log4j.PatternLayout
+
+log4j.appender.standard.layout = org.apache.log4j.PatternLayout
+log4j.appender.standard.layout.conversionPattern = %-5p | %d{DATE} | %c | %m%n
+
+# Class logging settings
+log4j.logger.com.sun.jersey = DEBUG
+log4j.logger.com.sun.jersey.spi.container.servlet.WebComponent = ERROR
+log4j.logger.org.apache.hadoop = INFO
+log4j.logger.org.apache.hadoop.conf = WARN
+log4j.logger.org.apache.zookeeper = WARN
+log4j.logger.org.eclipse.jetty = INFO
+
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/webhcat-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/webhcat-site.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/webhcat-site.xml
new file mode 100755
index 0000000..12dfd8a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/configuration/webhcat-site.xml
@@ -0,0 +1,167 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+<!-- The default settings for Templeton. -->
+<!-- Edit templeton-site.xml to change settings for your local -->
+<!-- install. -->
+
+<configuration supports_final="true">
+
+  <property>
+    <name>templeton.port</name>
+    <value>50111</value>
+    <description>The HTTP port for the main server.</description>
+  </property>
+
+  <property>
+    <name>templeton.hadoop.conf.dir</name>
+    <value>/etc/hadoop/conf</value>
+    <description>The path to the Hadoop configuration.</description>
+  </property>
+
+  <property>
+    <name>templeton.jar</name>
+    <!-- TODO VERIFY WITH STACK-SELECT value>/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar</value -->
+    <value>/usr/iop/current/hive-webhcat/share/webhcat/svr/lib/hive-webhcat-*.jar</value>
+    <description>The path to the Templeton jar file.</description>
+  </property>
+
+  <property>
+    <name>templeton.libjars</name>
+    <!-- TODO VERIFY WITH STACK-SELECT value>/usr/lib/zookeeper/zookeeper.jar</value-->
+    <value>/usr/iop/current/zookeeper-client/zookeeper.jar</value>
+    <description>Jars to add the the classpath.</description>
+  </property>
+
+  <property>
+    <name>templeton.hadoop</name>
+    <!-- TODO VERIFY WITH STACK-SELECT value>/usr/bin/hadoop</value-->
+    <value>/usr/iop/current/hadoop-client/bin/hadoop</value>
+    <description>The path to the Hadoop executable.</description>
+  </property>
+
+  <property>
+    <name>templeton.pig.archive</name>
+    <value>hdfs:///iop/apps/${iop.version}/pig/pig.tar.gz</value>
+    <description>The path to the Pig archive in HDFS.</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>templeton.pig.path</name>
+    <value>pig.tar.gz/pig/bin/pig</value>
+    <description>The path to the Pig executable.</description>
+  </property>
+
+  <property>
+    <name>templeton.hcat</name>
+    <!-- TODO VERIFY WITH STACK-SELECT value>/usr/bin/hcat</value -->
+    <value>/usr/iop/current/hive-client/bin/hcat</value>
+    <description>The path to the hcatalog executable.</description>
+  </property>
+
+  <property>
+    <name>templeton.hive.archive</name>
+    <value>hdfs:///iop/apps/${iop.version}/hive/hive.tar.gz</value>
+    <description>The path to the Hive archive.</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>templeton.hive.home</name>
+    <value>hive.tar.gz/hive</value>
+    <description>The path to the Hive home within the tar. Has no effect if templeton.hive.archive is not set.</description>
+  </property>
+
+  <property>
+    <name>templeton.hcat.home</name>
+    <value>hive.tar.gz/hive/hcatalog</value>
+    <description>The path to the HCat home within the tar. Has no effect if templeton.hive.archive is not set.</description>
+  </property>
+
+  <property>
+    <name>templeton.hive.path</name>
+    <value>hive.tar.gz/hive/bin/hive</value>
+    <description>The path to the Hive executable.</description>
+  </property>
+
+  <property>
+    <name>templeton.hive.properties</name>
+    <value>hive.metastore.local=false, hive.metastore.uris=thrift://localhost:9933, hive.metastore.sasl.enabled=false</value>
+    <description>Properties to set when running hive.</description>
+  </property>
+
+   <property>
+    <name>templeton.sqoop.archive</name>
+    <value>hdfs:///iop/apps/${iop.version}/sqoop/sqoop.tar.gz</value>
+    <description>The path to the Sqoop archive in HDFS.</description>
+  </property>
+
+  <property>
+    <name>templeton.sqoop.path</name>
+    <value>sqoop.tar.gz/sqoop/bin/sqoop</value>
+    <description>The path to the Sqoop executable.</description>
+  </property>
+
+  <property>
+    <name>templeton.sqoop.home</name>
+    <value>sqoop.tar.gz/sqoop</value>
+    <description>The path to the Sqoop home within the tar. Has no effect if
+      templeton.sqoop.archive is not set.
+    </description>
+  </property>
+
+  <property>
+    <name>templeton.zookeeper.hosts</name>
+    <value>localhost:2181</value>
+    <description>ZooKeeper servers, as comma separated host:port pairs</description>
+    <value-attributes>
+      <type>multiLine</type>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>templeton.storage.class</name>
+    <value>org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage</value>
+    <description>The class to use as storage</description>
+  </property>
+
+  <property>
+    <name>templeton.override.enabled</name>
+    <value>false</value>
+    <description>Enable the override path in templeton.override.jars</description>
+  </property>
+
+  <property>
+    <name>templeton.streaming.jar</name>
+    <value>hdfs:///iop/apps/${iop.version}/mapreduce/hadoop-streaming.jar</value>
+    <description>The hdfs path to the Hadoop streaming jar file.</description>
+  </property>
+
+  <property>
+    <name>templeton.exec.timeout</name>
+    <value>60000</value>
+    <description>Time out for templeton api</description>
+  </property>
+
+</configuration>


[32/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/hive-schema-0.12.0.postgres.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/hive-schema-0.12.0.postgres.sql b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/hive-schema-0.12.0.postgres.sql
new file mode 100755
index 0000000..61769f6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/hive-schema-0.12.0.postgres.sql
@@ -0,0 +1,1405 @@
+--
+-- PostgreSQL database dump
+--
+
+SET statement_timeout = 0;
+SET client_encoding = 'UTF8';
+SET standard_conforming_strings = off;
+SET check_function_bodies = false;
+SET client_min_messages = warning;
+SET escape_string_warning = off;
+
+SET search_path = public, pg_catalog;
+
+SET default_tablespace = '';
+
+SET default_with_oids = false;
+
+--
+-- Name: BUCKETING_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "BUCKETING_COLS" (
+    "SD_ID" bigint NOT NULL,
+    "BUCKET_COL_NAME" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: CDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "CDS" (
+    "CD_ID" bigint NOT NULL
+);
+
+
+--
+-- Name: COLUMNS_OLD; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "COLUMNS_OLD" (
+    "SD_ID" bigint NOT NULL,
+    "COMMENT" character varying(256) DEFAULT NULL::character varying,
+    "COLUMN_NAME" character varying(128) NOT NULL,
+    "TYPE_NAME" character varying(4000) NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: COLUMNS_V2; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "COLUMNS_V2" (
+    "CD_ID" bigint NOT NULL,
+    "COMMENT" character varying(4000),
+    "COLUMN_NAME" character varying(128) NOT NULL,
+    "TYPE_NAME" character varying(4000),
+    "INTEGER_IDX" integer NOT NULL
+);
+
+
+--
+-- Name: DATABASE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DATABASE_PARAMS" (
+    "DB_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(180) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: DBS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DBS" (
+    "DB_ID" bigint NOT NULL,
+    "DESC" character varying(4000) DEFAULT NULL::character varying,
+    "DB_LOCATION_URI" character varying(4000) NOT NULL,
+    "NAME" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: DB_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DB_PRIVS" (
+    "DB_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "DB_ID" bigint,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "DB_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: GLOBAL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "GLOBAL_PRIVS" (
+    "USER_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "USER_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: IDXS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "IDXS" (
+    "INDEX_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "DEFERRED_REBUILD" boolean NOT NULL,
+    "INDEX_HANDLER_CLASS" character varying(4000) DEFAULT NULL::character varying,
+    "INDEX_NAME" character varying(128) DEFAULT NULL::character varying,
+    "INDEX_TBL_ID" bigint,
+    "LAST_ACCESS_TIME" bigint NOT NULL,
+    "ORIG_TBL_ID" bigint,
+    "SD_ID" bigint
+);
+
+
+--
+-- Name: INDEX_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "INDEX_PARAMS" (
+    "INDEX_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: NUCLEUS_TABLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "NUCLEUS_TABLES" (
+    "CLASS_NAME" character varying(128) NOT NULL,
+    "TABLE_NAME" character varying(128) NOT NULL,
+    "TYPE" character varying(4) NOT NULL,
+    "OWNER" character varying(2) NOT NULL,
+    "VERSION" character varying(20) NOT NULL,
+    "INTERFACE_NAME" character varying(255) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PARTITIONS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITIONS" (
+    "PART_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "LAST_ACCESS_TIME" bigint NOT NULL,
+    "PART_NAME" character varying(767) DEFAULT NULL::character varying,
+    "SD_ID" bigint,
+    "TBL_ID" bigint
+);
+
+
+--
+-- Name: PARTITION_EVENTS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_EVENTS" (
+    "PART_NAME_ID" bigint NOT NULL,
+    "DB_NAME" character varying(128),
+    "EVENT_TIME" bigint NOT NULL,
+    "EVENT_TYPE" integer NOT NULL,
+    "PARTITION_NAME" character varying(767),
+    "TBL_NAME" character varying(128)
+);
+
+
+--
+-- Name: PARTITION_KEYS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_KEYS" (
+    "TBL_ID" bigint NOT NULL,
+    "PKEY_COMMENT" character varying(4000) DEFAULT NULL::character varying,
+    "PKEY_NAME" character varying(128) NOT NULL,
+    "PKEY_TYPE" character varying(767) NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: PARTITION_KEY_VALS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_KEY_VALS" (
+    "PART_ID" bigint NOT NULL,
+    "PART_KEY_VAL" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: PARTITION_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_PARAMS" (
+    "PART_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PART_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_COL_PRIVS" (
+    "PART_COLUMN_GRANT_ID" bigint NOT NULL,
+    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_ID" bigint,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_COL_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PART_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_PRIVS" (
+    "PART_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_ID" bigint,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: ROLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "ROLES" (
+    "ROLE_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "OWNER_NAME" character varying(128) DEFAULT NULL::character varying,
+    "ROLE_NAME" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: ROLE_MAP; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "ROLE_MAP" (
+    "ROLE_GRANT_ID" bigint NOT NULL,
+    "ADD_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "ROLE_ID" bigint
+);
+
+
+--
+-- Name: SDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SDS" (
+    "SD_ID" bigint NOT NULL,
+    "INPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
+    "IS_COMPRESSED" boolean NOT NULL,
+    "LOCATION" character varying(4000) DEFAULT NULL::character varying,
+    "NUM_BUCKETS" bigint NOT NULL,
+    "OUTPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
+    "SERDE_ID" bigint,
+    "CD_ID" bigint,
+    "IS_STOREDASSUBDIRECTORIES" boolean NOT NULL
+);
+
+
+--
+-- Name: SD_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SD_PARAMS" (
+    "SD_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: SEQUENCE_TABLE; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SEQUENCE_TABLE" (
+    "SEQUENCE_NAME" character varying(255) NOT NULL,
+    "NEXT_VAL" bigint NOT NULL
+);
+
+
+--
+-- Name: SERDES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SERDES" (
+    "SERDE_ID" bigint NOT NULL,
+    "NAME" character varying(128) DEFAULT NULL::character varying,
+    "SLIB" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: SERDE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SERDE_PARAMS" (
+    "SERDE_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: SORT_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SORT_COLS" (
+    "SD_ID" bigint NOT NULL,
+    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+    "ORDER" bigint NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: TABLE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TABLE_PARAMS" (
+    "TBL_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: TBLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBLS" (
+    "TBL_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "DB_ID" bigint,
+    "LAST_ACCESS_TIME" bigint NOT NULL,
+    "OWNER" character varying(767) DEFAULT NULL::character varying,
+    "RETENTION" bigint NOT NULL,
+    "SD_ID" bigint,
+    "TBL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "TBL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "VIEW_EXPANDED_TEXT" text,
+    "VIEW_ORIGINAL_TEXT" text
+);
+
+
+--
+-- Name: TBL_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBL_COL_PRIVS" (
+    "TBL_COLUMN_GRANT_ID" bigint NOT NULL,
+    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "TBL_COL_PRIV" character varying(128) DEFAULT NULL::character varying,
+    "TBL_ID" bigint
+);
+
+
+--
+-- Name: TBL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBL_PRIVS" (
+    "TBL_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "TBL_PRIV" character varying(128) DEFAULT NULL::character varying,
+    "TBL_ID" bigint
+);
+
+
+--
+-- Name: TYPES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TYPES" (
+    "TYPES_ID" bigint NOT NULL,
+    "TYPE_NAME" character varying(128) DEFAULT NULL::character varying,
+    "TYPE1" character varying(767) DEFAULT NULL::character varying,
+    "TYPE2" character varying(767) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: TYPE_FIELDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TYPE_FIELDS" (
+    "TYPE_NAME" bigint NOT NULL,
+    "COMMENT" character varying(256) DEFAULT NULL::character varying,
+    "FIELD_NAME" character varying(128) NOT NULL,
+    "FIELD_TYPE" character varying(767) NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_STRING_LIST" (
+    "STRING_LIST_ID" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_STRING_LIST_VALUES" (
+    "STRING_LIST_ID" bigint NOT NULL,
+    "STRING_LIST_VALUE" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_COL_NAMES" (
+    "SD_ID" bigint NOT NULL,
+    "SKEWED_COL_NAME" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_COL_VALUE_LOC_MAP" (
+    "SD_ID" bigint NOT NULL,
+    "STRING_LIST_ID_KID" bigint NOT NULL,
+    "LOCATION" character varying(4000) DEFAULT NULL::character varying
+);
+
+CREATE TABLE "SKEWED_VALUES" (
+    "SD_ID_OID" bigint NOT NULL,
+    "STRING_LIST_ID_EID" bigint NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: TAB_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE  "MASTER_KEYS"
+(
+    "KEY_ID" SERIAL,
+    "MASTER_KEY" varchar(767) NULL,
+    PRIMARY KEY ("KEY_ID")
+);
+
+CREATE TABLE  "DELEGATION_TOKENS"
+(
+    "TOKEN_IDENT" varchar(767) NOT NULL,
+    "TOKEN" varchar(767) NULL,
+    PRIMARY KEY ("TOKEN_IDENT")
+);
+
+CREATE TABLE "TAB_COL_STATS" (
+ "CS_ID" bigint NOT NULL,
+ "DB_NAME" character varying(128) DEFAULT NULL::character varying,
+ "TABLE_NAME" character varying(128) DEFAULT NULL::character varying,
+ "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+ "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "TBL_ID" bigint NOT NULL,
+ "LONG_LOW_VALUE" bigint,
+ "LONG_HIGH_VALUE" bigint,
+ "DOUBLE_LOW_VALUE" double precision,
+ "DOUBLE_HIGH_VALUE" double precision,
+ "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "NUM_NULLS" bigint NOT NULL,
+ "NUM_DISTINCTS" bigint,
+ "AVG_COL_LEN" double precision,
+ "MAX_COL_LEN" bigint,
+ "NUM_TRUES" bigint,
+ "NUM_FALSES" bigint,
+ "LAST_ANALYZED" bigint NOT NULL
+);
+
+--
+-- Table structure for VERSION
+--
+CREATE TABLE "VERSION" (
+  "VER_ID" bigint,
+  "SCHEMA_VERSION" character varying(127) NOT NULL,
+  "VERSION_COMMENT" character varying(255) NOT NULL
+);
+
+--
+-- Name: PART_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_COL_STATS" (
+ "CS_ID" bigint NOT NULL,
+ "DB_NAME" character varying(128) DEFAULT NULL::character varying,
+ "TABLE_NAME" character varying(128) DEFAULT NULL::character varying,
+ "PARTITION_NAME" character varying(767) DEFAULT NULL::character varying,
+ "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+ "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "PART_ID" bigint NOT NULL,
+ "LONG_LOW_VALUE" bigint,
+ "LONG_HIGH_VALUE" bigint,
+ "DOUBLE_LOW_VALUE" double precision,
+ "DOUBLE_HIGH_VALUE" double precision,
+ "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "NUM_NULLS" bigint NOT NULL,
+ "NUM_DISTINCTS" bigint,
+ "AVG_COL_LEN" double precision,
+ "MAX_COL_LEN" bigint,
+ "NUM_TRUES" bigint,
+ "NUM_FALSES" bigint,
+ "LAST_ANALYZED" bigint NOT NULL
+);
+
+--
+-- Name: BUCKETING_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "BUCKETING_COLS"
+    ADD CONSTRAINT "BUCKETING_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+
+--
+-- Name: CDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "CDS"
+    ADD CONSTRAINT "CDS_pkey" PRIMARY KEY ("CD_ID");
+
+
+--
+-- Name: COLUMNS_V2_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "COLUMNS_V2"
+    ADD CONSTRAINT "COLUMNS_V2_pkey" PRIMARY KEY ("CD_ID", "COLUMN_NAME");
+
+
+--
+-- Name: COLUMNS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "COLUMNS_OLD"
+    ADD CONSTRAINT "COLUMNS_pkey" PRIMARY KEY ("SD_ID", "COLUMN_NAME");
+
+
+--
+-- Name: DATABASE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DATABASE_PARAMS"
+    ADD CONSTRAINT "DATABASE_PARAMS_pkey" PRIMARY KEY ("DB_ID", "PARAM_KEY");
+
+
+--
+-- Name: DBPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+    ADD CONSTRAINT "DBPRIVILEGEINDEX" UNIQUE ("DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: DBS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DBS"
+    ADD CONSTRAINT "DBS_pkey" PRIMARY KEY ("DB_ID");
+
+
+--
+-- Name: DB_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+    ADD CONSTRAINT "DB_PRIVS_pkey" PRIMARY KEY ("DB_GRANT_ID");
+
+
+--
+-- Name: GLOBALPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "GLOBAL_PRIVS"
+    ADD CONSTRAINT "GLOBALPRIVILEGEINDEX" UNIQUE ("PRINCIPAL_NAME", "PRINCIPAL_TYPE", "USER_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: GLOBAL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "GLOBAL_PRIVS"
+    ADD CONSTRAINT "GLOBAL_PRIVS_pkey" PRIMARY KEY ("USER_GRANT_ID");
+
+
+--
+-- Name: IDXS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_pkey" PRIMARY KEY ("INDEX_ID");
+
+
+--
+-- Name: INDEX_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "INDEX_PARAMS"
+    ADD CONSTRAINT "INDEX_PARAMS_pkey" PRIMARY KEY ("INDEX_ID", "PARAM_KEY");
+
+
+--
+-- Name: NUCLEUS_TABLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "NUCLEUS_TABLES"
+    ADD CONSTRAINT "NUCLEUS_TABLES_pkey" PRIMARY KEY ("CLASS_NAME");
+
+
+--
+-- Name: PARTITIONS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "PARTITIONS_pkey" PRIMARY KEY ("PART_ID");
+
+
+--
+-- Name: PARTITION_EVENTS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_EVENTS"
+    ADD CONSTRAINT "PARTITION_EVENTS_pkey" PRIMARY KEY ("PART_NAME_ID");
+
+
+--
+-- Name: PARTITION_KEYS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_KEYS"
+    ADD CONSTRAINT "PARTITION_KEYS_pkey" PRIMARY KEY ("TBL_ID", "PKEY_NAME");
+
+
+--
+-- Name: PARTITION_KEY_VALS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_KEY_VALS"
+    ADD CONSTRAINT "PARTITION_KEY_VALS_pkey" PRIMARY KEY ("PART_ID", "INTEGER_IDX");
+
+
+--
+-- Name: PARTITION_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_PARAMS"
+    ADD CONSTRAINT "PARTITION_PARAMS_pkey" PRIMARY KEY ("PART_ID", "PARAM_KEY");
+
+
+--
+-- Name: PART_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PART_COL_PRIVS"
+    ADD CONSTRAINT "PART_COL_PRIVS_pkey" PRIMARY KEY ("PART_COLUMN_GRANT_ID");
+
+
+--
+-- Name: PART_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PART_PRIVS"
+    ADD CONSTRAINT "PART_PRIVS_pkey" PRIMARY KEY ("PART_GRANT_ID");
+
+
+--
+-- Name: ROLEENTITYINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLES"
+    ADD CONSTRAINT "ROLEENTITYINDEX" UNIQUE ("ROLE_NAME");
+
+
+--
+-- Name: ROLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLES"
+    ADD CONSTRAINT "ROLES_pkey" PRIMARY KEY ("ROLE_ID");
+
+
+--
+-- Name: ROLE_MAP_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+    ADD CONSTRAINT "ROLE_MAP_pkey" PRIMARY KEY ("ROLE_GRANT_ID");
+
+
+--
+-- Name: SDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SDS"
+    ADD CONSTRAINT "SDS_pkey" PRIMARY KEY ("SD_ID");
+
+
+--
+-- Name: SD_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SD_PARAMS"
+    ADD CONSTRAINT "SD_PARAMS_pkey" PRIMARY KEY ("SD_ID", "PARAM_KEY");
+
+
+--
+-- Name: SEQUENCE_TABLE_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SEQUENCE_TABLE"
+    ADD CONSTRAINT "SEQUENCE_TABLE_pkey" PRIMARY KEY ("SEQUENCE_NAME");
+
+
+--
+-- Name: SERDES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SERDES"
+    ADD CONSTRAINT "SERDES_pkey" PRIMARY KEY ("SERDE_ID");
+
+
+--
+-- Name: SERDE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SERDE_PARAMS"
+    ADD CONSTRAINT "SERDE_PARAMS_pkey" PRIMARY KEY ("SERDE_ID", "PARAM_KEY");
+
+
+--
+-- Name: SORT_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SORT_COLS"
+    ADD CONSTRAINT "SORT_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+
+--
+-- Name: TABLE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TABLE_PARAMS"
+    ADD CONSTRAINT "TABLE_PARAMS_pkey" PRIMARY KEY ("TBL_ID", "PARAM_KEY");
+
+
+--
+-- Name: TBLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "TBLS_pkey" PRIMARY KEY ("TBL_ID");
+
+
+--
+-- Name: TBL_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBL_COL_PRIVS"
+    ADD CONSTRAINT "TBL_COL_PRIVS_pkey" PRIMARY KEY ("TBL_COLUMN_GRANT_ID");
+
+
+--
+-- Name: TBL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBL_PRIVS"
+    ADD CONSTRAINT "TBL_PRIVS_pkey" PRIMARY KEY ("TBL_GRANT_ID");
+
+
+--
+-- Name: TYPES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPES"
+    ADD CONSTRAINT "TYPES_pkey" PRIMARY KEY ("TYPES_ID");
+
+
+--
+-- Name: TYPE_FIELDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPE_FIELDS"
+    ADD CONSTRAINT "TYPE_FIELDS_pkey" PRIMARY KEY ("TYPE_NAME", "FIELD_NAME");
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST"
+    ADD CONSTRAINT "SKEWED_STRING_LIST_pkey" PRIMARY KEY ("STRING_LIST_ID");
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
+    ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_pkey" PRIMARY KEY ("STRING_LIST_ID", "INTEGER_IDX");
+
+
+ALTER TABLE ONLY "SKEWED_COL_NAMES"
+    ADD CONSTRAINT "SKEWED_COL_NAMES_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_pkey" PRIMARY KEY ("SD_ID", "STRING_LIST_ID_KID");
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+    ADD CONSTRAINT "SKEWED_VALUES_pkey" PRIMARY KEY ("SD_ID_OID", "INTEGER_IDX");
+
+--
+-- Name: TAB_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_pkey" PRIMARY KEY("CS_ID");
+
+--
+-- Name: PART_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_pkey" PRIMARY KEY("CS_ID");
+
+--
+-- Name: UNIQUEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "UNIQUEINDEX" UNIQUE ("INDEX_NAME", "ORIG_TBL_ID");
+
+
+--
+-- Name: UNIQUEPARTITION; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "UNIQUEPARTITION" UNIQUE ("PART_NAME", "TBL_ID");
+
+
+--
+-- Name: UNIQUETABLE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "UNIQUETABLE" UNIQUE ("TBL_NAME", "DB_ID");
+
+
+--
+-- Name: UNIQUE_DATABASE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DBS"
+    ADD CONSTRAINT "UNIQUE_DATABASE" UNIQUE ("NAME");
+
+
+--
+-- Name: UNIQUE_TYPE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPES"
+    ADD CONSTRAINT "UNIQUE_TYPE" UNIQUE ("TYPE_NAME");
+
+
+--
+-- Name: USERROLEMAPINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+    ADD CONSTRAINT "USERROLEMAPINDEX" UNIQUE ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: BUCKETING_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "BUCKETING_COLS_N49" ON "BUCKETING_COLS" USING btree ("SD_ID");
+
+
+--
+-- Name: COLUMNS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "COLUMNS_N49" ON "COLUMNS_OLD" USING btree ("SD_ID");
+
+
+--
+-- Name: DATABASE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "DATABASE_PARAMS_N49" ON "DATABASE_PARAMS" USING btree ("DB_ID");
+
+
+--
+-- Name: DB_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "DB_PRIVS_N49" ON "DB_PRIVS" USING btree ("DB_ID");
+
+
+--
+-- Name: IDXS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N49" ON "IDXS" USING btree ("ORIG_TBL_ID");
+
+
+--
+-- Name: IDXS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N50" ON "IDXS" USING btree ("INDEX_TBL_ID");
+
+
+--
+-- Name: IDXS_N51; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N51" ON "IDXS" USING btree ("SD_ID");
+
+
+--
+-- Name: INDEX_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "INDEX_PARAMS_N49" ON "INDEX_PARAMS" USING btree ("INDEX_ID");
+
+
+--
+-- Name: PARTITIONCOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONCOLUMNPRIVILEGEINDEX" ON "PART_COL_PRIVS" USING btree ("PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: PARTITIONEVENTINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONEVENTINDEX" ON "PARTITION_EVENTS" USING btree ("PARTITION_NAME");
+
+
+--
+-- Name: PARTITIONS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONS_N49" ON "PARTITIONS" USING btree ("TBL_ID");
+
+
+--
+-- Name: PARTITIONS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONS_N50" ON "PARTITIONS" USING btree ("SD_ID");
+
+
+--
+-- Name: PARTITION_KEYS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_KEYS_N49" ON "PARTITION_KEYS" USING btree ("TBL_ID");
+
+
+--
+-- Name: PARTITION_KEY_VALS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_KEY_VALS_N49" ON "PARTITION_KEY_VALS" USING btree ("PART_ID");
+
+
+--
+-- Name: PARTITION_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_PARAMS_N49" ON "PARTITION_PARAMS" USING btree ("PART_ID");
+
+
+--
+-- Name: PARTPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTPRIVILEGEINDEX" ON "PART_PRIVS" USING btree ("PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: PART_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_COL_PRIVS_N49" ON "PART_COL_PRIVS" USING btree ("PART_ID");
+
+
+--
+-- Name: PART_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_PRIVS_N49" ON "PART_PRIVS" USING btree ("PART_ID");
+
+
+--
+-- Name: ROLE_MAP_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "ROLE_MAP_N49" ON "ROLE_MAP" USING btree ("ROLE_ID");
+
+
+--
+-- Name: SDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SDS_N49" ON "SDS" USING btree ("SERDE_ID");
+
+
+--
+-- Name: SD_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SD_PARAMS_N49" ON "SD_PARAMS" USING btree ("SD_ID");
+
+
+--
+-- Name: SERDE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SERDE_PARAMS_N49" ON "SERDE_PARAMS" USING btree ("SERDE_ID");
+
+
+--
+-- Name: SORT_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SORT_COLS_N49" ON "SORT_COLS" USING btree ("SD_ID");
+
+
+--
+-- Name: TABLECOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLECOLUMNPRIVILEGEINDEX" ON "TBL_COL_PRIVS" USING btree ("TBL_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: TABLEPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLEPRIVILEGEINDEX" ON "TBL_PRIVS" USING btree ("TBL_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: TABLE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLE_PARAMS_N49" ON "TABLE_PARAMS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TBLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBLS_N49" ON "TBLS" USING btree ("DB_ID");
+
+
+--
+-- Name: TBLS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBLS_N50" ON "TBLS" USING btree ("SD_ID");
+
+
+--
+-- Name: TBL_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBL_COL_PRIVS_N49" ON "TBL_COL_PRIVS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TBL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBL_PRIVS_N49" ON "TBL_PRIVS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TYPE_FIELDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TYPE_FIELDS_N49" ON "TYPE_FIELDS" USING btree ("TYPE_NAME");
+
+--
+-- Name: TAB_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TAB_COL_STATS_N49" ON "TAB_COL_STATS" USING btree ("TBL_ID");
+
+--
+-- Name: PART_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_COL_STATS_N49" ON "PART_COL_STATS" USING btree ("PART_ID");
+
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
+    ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_fkey" FOREIGN KEY ("STRING_LIST_ID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "SKEWED_COL_NAMES"
+    ADD CONSTRAINT "SKEWED_COL_NAMES_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey1" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey2" FOREIGN KEY ("STRING_LIST_ID_KID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+    ADD CONSTRAINT "SKEWED_VALUES_fkey1" FOREIGN KEY ("STRING_LIST_ID_EID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+    ADD CONSTRAINT "SKEWED_VALUES_fkey2" FOREIGN KEY ("SD_ID_OID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: BUCKETING_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "BUCKETING_COLS"
+    ADD CONSTRAINT "BUCKETING_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: COLUMNS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "COLUMNS_OLD"
+    ADD CONSTRAINT "COLUMNS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: COLUMNS_V2_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "COLUMNS_V2"
+    ADD CONSTRAINT "COLUMNS_V2_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
+
+
+--
+-- Name: DATABASE_PARAMS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "DATABASE_PARAMS"
+    ADD CONSTRAINT "DATABASE_PARAMS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: DB_PRIVS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+    ADD CONSTRAINT "DB_PRIVS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_INDEX_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_INDEX_TBL_ID_fkey" FOREIGN KEY ("INDEX_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_ORIG_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_ORIG_TBL_ID_fkey" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: INDEX_PARAMS_INDEX_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "INDEX_PARAMS"
+    ADD CONSTRAINT "INDEX_PARAMS_INDEX_ID_fkey" FOREIGN KEY ("INDEX_ID") REFERENCES "IDXS"("INDEX_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITIONS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "PARTITIONS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITIONS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "PARTITIONS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_KEYS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_KEYS"
+    ADD CONSTRAINT "PARTITION_KEYS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_KEY_VALS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_KEY_VALS"
+    ADD CONSTRAINT "PARTITION_KEY_VALS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_PARAMS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_PARAMS"
+    ADD CONSTRAINT "PARTITION_PARAMS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_COL_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PART_COL_PRIVS"
+    ADD CONSTRAINT "PART_COL_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PART_PRIVS"
+    ADD CONSTRAINT "PART_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: ROLE_MAP_ROLE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+    ADD CONSTRAINT "ROLE_MAP_ROLE_ID_fkey" FOREIGN KEY ("ROLE_ID") REFERENCES "ROLES"("ROLE_ID") DEFERRABLE;
+
+
+--
+-- Name: SDS_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SDS"
+    ADD CONSTRAINT "SDS_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
+
+
+--
+-- Name: SDS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SDS"
+    ADD CONSTRAINT "SDS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
+
+
+--
+-- Name: SD_PARAMS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SD_PARAMS"
+    ADD CONSTRAINT "SD_PARAMS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: SERDE_PARAMS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SERDE_PARAMS"
+    ADD CONSTRAINT "SERDE_PARAMS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
+
+
+--
+-- Name: SORT_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SORT_COLS"
+    ADD CONSTRAINT "SORT_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: TABLE_PARAMS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TABLE_PARAMS"
+    ADD CONSTRAINT "TABLE_PARAMS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TBLS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "TBLS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: TBLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "TBLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: TBL_COL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBL_COL_PRIVS"
+    ADD CONSTRAINT "TBL_COL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TBL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBL_PRIVS"
+    ADD CONSTRAINT "TBL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TYPE_FIELDS_TYPE_NAME_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TYPE_FIELDS"
+    ADD CONSTRAINT "TYPE_FIELDS_TYPE_NAME_fkey" FOREIGN KEY ("TYPE_NAME") REFERENCES "TYPES"("TYPES_ID") DEFERRABLE;
+
+--
+-- Name: TAB_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_fkey" FOREIGN KEY("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_fkey" FOREIGN KEY("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "VERSION" ADD CONSTRAINT "VERSION_pkey" PRIMARY KEY ("VER_ID");
+
+--
+-- Name: public; Type: ACL; Schema: -; Owner: hiveuser
+--
+
+REVOKE ALL ON SCHEMA public FROM PUBLIC;
+GRANT ALL ON SCHEMA public TO PUBLIC;
+
+
+INSERT INTO "VERSION" ("VER_ID", "SCHEMA_VERSION", "VERSION_COMMENT") VALUES (1, '0.12.0', 'Hive release version 0.12.0');
+--
+-- PostgreSQL database dump complete
+--

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/hive-schema-0.13.0.mysql.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/hive-schema-0.13.0.mysql.sql b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/hive-schema-0.13.0.mysql.sql
new file mode 100755
index 0000000..b8f2cfa
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/etc/hive-schema-0.13.0.mysql.sql
@@ -0,0 +1,889 @@
+-- MySQL dump 10.13  Distrib 5.5.25, for osx10.6 (i386)
+--
+-- Host: localhost    Database: test
+-- ------------------------------------------------------
+-- Server version	5.5.25
+
+/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
+/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
+/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
+/*!40101 SET NAMES utf8 */;
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+
+--
+-- Table structure for table `BUCKETING_COLS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `BUCKETING_COLS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `BUCKET_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `BUCKETING_COLS_N49` (`SD_ID`),
+  CONSTRAINT `BUCKETING_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `CDS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `CDS` (
+  `CD_ID` bigint(20) NOT NULL,
+  PRIMARY KEY (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `COLUMNS_V2`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `COLUMNS_V2` (
+  `CD_ID` bigint(20) NOT NULL,
+  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TYPE_NAME` varchar(4000) DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`CD_ID`,`COLUMN_NAME`),
+  KEY `COLUMNS_V2_N49` (`CD_ID`),
+  CONSTRAINT `COLUMNS_V2_FK1` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DATABASE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DATABASE_PARAMS` (
+  `DB_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(180) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`DB_ID`,`PARAM_KEY`),
+  KEY `DATABASE_PARAMS_N49` (`DB_ID`),
+  CONSTRAINT `DATABASE_PARAMS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DBS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DBS` (
+  `DB_ID` bigint(20) NOT NULL,
+  `DESC` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `DB_LOCATION_URI` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `OWNER_TYPE` varchar(10) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`DB_ID`),
+  UNIQUE KEY `UNIQUE_DATABASE` (`NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DB_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DB_PRIVS` (
+  `DB_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `DB_ID` bigint(20) DEFAULT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `DB_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`DB_GRANT_ID`),
+  UNIQUE KEY `DBPRIVILEGEINDEX` (`DB_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`DB_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `DB_PRIVS_N49` (`DB_ID`),
+  CONSTRAINT `DB_PRIVS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `GLOBAL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `GLOBAL_PRIVS` (
+  `USER_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `USER_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`USER_GRANT_ID`),
+  UNIQUE KEY `GLOBALPRIVILEGEINDEX` (`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`USER_PRIV`,`GRANTOR`,`GRANTOR_TYPE`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `IDXS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `IDXS` (
+  `INDEX_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `DEFERRED_REBUILD` bit(1) NOT NULL,
+  `INDEX_HANDLER_CLASS` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INDEX_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INDEX_TBL_ID` bigint(20) DEFAULT NULL,
+  `LAST_ACCESS_TIME` int(11) NOT NULL,
+  `ORIG_TBL_ID` bigint(20) DEFAULT NULL,
+  `SD_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`INDEX_ID`),
+  UNIQUE KEY `UNIQUEINDEX` (`INDEX_NAME`,`ORIG_TBL_ID`),
+  KEY `IDXS_N51` (`SD_ID`),
+  KEY `IDXS_N50` (`INDEX_TBL_ID`),
+  KEY `IDXS_N49` (`ORIG_TBL_ID`),
+  CONSTRAINT `IDXS_FK1` FOREIGN KEY (`ORIG_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
+  CONSTRAINT `IDXS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+  CONSTRAINT `IDXS_FK3` FOREIGN KEY (`INDEX_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `INDEX_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `INDEX_PARAMS` (
+  `INDEX_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`INDEX_ID`,`PARAM_KEY`),
+  KEY `INDEX_PARAMS_N49` (`INDEX_ID`),
+  CONSTRAINT `INDEX_PARAMS_FK1` FOREIGN KEY (`INDEX_ID`) REFERENCES `IDXS` (`INDEX_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `NUCLEUS_TABLES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `NUCLEUS_TABLES` (
+  `CLASS_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TYPE` varchar(4) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `OWNER` varchar(2) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `VERSION` varchar(20) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `INTERFACE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`CLASS_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITIONS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITIONS` (
+  `PART_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `LAST_ACCESS_TIME` int(11) NOT NULL,
+  `PART_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `SD_ID` bigint(20) DEFAULT NULL,
+  `TBL_ID` bigint(20) DEFAULT NULL,
+  `LINK_TARGET_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`PART_ID`),
+  UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`),
+  KEY `PARTITIONS_N49` (`TBL_ID`),
+  KEY `PARTITIONS_N50` (`SD_ID`),
+  KEY `PARTITIONS_N51` (`LINK_TARGET_ID`),
+  CONSTRAINT `PARTITIONS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
+  CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+  CONSTRAINT `PARTITIONS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_EVENTS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_EVENTS` (
+  `PART_NAME_ID` bigint(20) NOT NULL,
+  `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `EVENT_TIME` bigint(20) NOT NULL,
+  `EVENT_TYPE` int(11) NOT NULL,
+  `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_NAME_ID`),
+  KEY `PARTITIONEVENTINDEX` (`PARTITION_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_KEYS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_KEYS` (
+  `TBL_ID` bigint(20) NOT NULL,
+  `PKEY_COMMENT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PKEY_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PKEY_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`TBL_ID`,`PKEY_NAME`),
+  KEY `PARTITION_KEYS_N49` (`TBL_ID`),
+  CONSTRAINT `PARTITION_KEYS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_KEY_VALS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_KEY_VALS` (
+  `PART_ID` bigint(20) NOT NULL,
+  `PART_KEY_VAL` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`PART_ID`,`INTEGER_IDX`),
+  KEY `PARTITION_KEY_VALS_N49` (`PART_ID`),
+  CONSTRAINT `PARTITION_KEY_VALS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_PARAMS` (
+  `PART_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_ID`,`PARAM_KEY`),
+  KEY `PARTITION_PARAMS_N49` (`PART_ID`),
+  CONSTRAINT `PARTITION_PARAMS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PART_COL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PART_COL_PRIVS` (
+  `PART_COLUMN_GRANT_ID` bigint(20) NOT NULL,
+  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_ID` bigint(20) DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_COLUMN_GRANT_ID`),
+  KEY `PART_COL_PRIVS_N49` (`PART_ID`),
+  KEY `PARTITIONCOLUMNPRIVILEGEINDEX` (`PART_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  CONSTRAINT `PART_COL_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PART_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PART_PRIVS` (
+  `PART_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_ID` bigint(20) DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_GRANT_ID`),
+  KEY `PARTPRIVILEGEINDEX` (`PART_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `PART_PRIVS_N49` (`PART_ID`),
+  CONSTRAINT `PART_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `ROLES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `ROLES` (
+  `ROLE_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `ROLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`ROLE_ID`),
+  UNIQUE KEY `ROLEENTITYINDEX` (`ROLE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `ROLE_MAP`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `ROLE_MAP` (
+  `ROLE_GRANT_ID` bigint(20) NOT NULL,
+  `ADD_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `ROLE_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`ROLE_GRANT_ID`),
+  UNIQUE KEY `USERROLEMAPINDEX` (`PRINCIPAL_NAME`,`ROLE_ID`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `ROLE_MAP_N49` (`ROLE_ID`),
+  CONSTRAINT `ROLE_MAP_FK1` FOREIGN KEY (`ROLE_ID`) REFERENCES `ROLES` (`ROLE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SDS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SDS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `CD_ID` bigint(20) DEFAULT NULL,
+  `INPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `IS_COMPRESSED` bit(1) NOT NULL,
+  `IS_STOREDASSUBDIRECTORIES` bit(1) NOT NULL,
+  `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `NUM_BUCKETS` int(11) NOT NULL,
+  `OUTPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `SERDE_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`SD_ID`),
+  KEY `SDS_N49` (`SERDE_ID`),
+  KEY `SDS_N50` (`CD_ID`),
+  CONSTRAINT `SDS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`),
+  CONSTRAINT `SDS_FK2` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SD_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SD_PARAMS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SD_ID`,`PARAM_KEY`),
+  KEY `SD_PARAMS_N49` (`SD_ID`),
+  CONSTRAINT `SD_PARAMS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SEQUENCE_TABLE`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SEQUENCE_TABLE` (
+  `SEQUENCE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `NEXT_VAL` bigint(20) NOT NULL,
+  PRIMARY KEY (`SEQUENCE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SERDES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SERDES` (
+  `SERDE_ID` bigint(20) NOT NULL,
+  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `SLIB` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SERDE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SERDE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SERDE_PARAMS` (
+  `SERDE_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SERDE_ID`,`PARAM_KEY`),
+  KEY `SERDE_PARAMS_N49` (`SERDE_ID`),
+  CONSTRAINT `SERDE_PARAMS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_COL_NAMES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_COL_NAMES` (
+  `SD_ID` bigint(20) NOT NULL,
+  `SKEWED_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `SKEWED_COL_NAMES_N49` (`SD_ID`),
+  CONSTRAINT `SKEWED_COL_NAMES_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_COL_VALUE_LOC_MAP`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_COL_VALUE_LOC_MAP` (
+  `SD_ID` bigint(20) NOT NULL,
+  `STRING_LIST_ID_KID` bigint(20) NOT NULL,
+  `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SD_ID`,`STRING_LIST_ID_KID`),
+  KEY `SKEWED_COL_VALUE_LOC_MAP_N49` (`STRING_LIST_ID_KID`),
+  KEY `SKEWED_COL_VALUE_LOC_MAP_N50` (`SD_ID`),
+  CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK2` FOREIGN KEY (`STRING_LIST_ID_KID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
+  CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_STRING_LIST`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST` (
+  `STRING_LIST_ID` bigint(20) NOT NULL,
+  PRIMARY KEY (`STRING_LIST_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_STRING_LIST_VALUES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST_VALUES` (
+  `STRING_LIST_ID` bigint(20) NOT NULL,
+  `STRING_LIST_VALUE` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`STRING_LIST_ID`,`INTEGER_IDX`),
+  KEY `SKEWED_STRING_LIST_VALUES_N49` (`STRING_LIST_ID`),
+  CONSTRAINT `SKEWED_STRING_LIST_VALUES_FK1` FOREIGN KEY (`STRING_LIST_ID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_VALUES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_VALUES` (
+  `SD_ID_OID` bigint(20) NOT NULL,
+  `STRING_LIST_ID_EID` bigint(20) NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID_OID`,`INTEGER_IDX`),
+  KEY `SKEWED_VALUES_N50` (`SD_ID_OID`),
+  KEY `SKEWED_VALUES_N49` (`STRING_LIST_ID_EID`),
+  CONSTRAINT `SKEWED_VALUES_FK2` FOREIGN KEY (`STRING_LIST_ID_EID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
+  CONSTRAINT `SKEWED_VALUES_FK1` FOREIGN KEY (`SD_ID_OID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SORT_COLS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SORT_COLS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `ORDER` int(11) NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `SORT_COLS_N49` (`SD_ID`),
+  CONSTRAINT `SORT_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TABLE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TABLE_PARAMS` (
+  `TBL_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`TBL_ID`,`PARAM_KEY`),
+  KEY `TABLE_PARAMS_N49` (`TBL_ID`),
+  CONSTRAINT `TABLE_PARAMS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBLS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBLS` (
+  `TBL_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `DB_ID` bigint(20) DEFAULT NULL,
+  `LAST_ACCESS_TIME` int(11) NOT NULL,
+  `OWNER` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `RETENTION` int(11) NOT NULL,
+  `SD_ID` bigint(20) DEFAULT NULL,
+  `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `VIEW_EXPANDED_TEXT` mediumtext,
+  `VIEW_ORIGINAL_TEXT` mediumtext,
+  `LINK_TARGET_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`TBL_ID`),
+  UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`),
+  KEY `TBLS_N50` (`SD_ID`),
+  KEY `TBLS_N49` (`DB_ID`),
+  KEY `TBLS_N51` (`LINK_TARGET_ID`),
+  CONSTRAINT `TBLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+  CONSTRAINT `TBLS_FK2` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`),
+  CONSTRAINT `TBLS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBL_COL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBL_COL_PRIVS` (
+  `TBL_COLUMN_GRANT_ID` bigint(20) NOT NULL,
+  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`TBL_COLUMN_GRANT_ID`),
+  KEY `TABLECOLUMNPRIVILEGEINDEX` (`TBL_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `TBL_COL_PRIVS_N49` (`TBL_ID`),
+  CONSTRAINT `TBL_COL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBL_PRIVS` (
+  `TBL_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`TBL_GRANT_ID`),
+  KEY `TBL_PRIVS_N49` (`TBL_ID`),
+  KEY `TABLEPRIVILEGEINDEX` (`TBL_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  CONSTRAINT `TBL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TAB_COL_STATS`
+--
+CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` (
+ `CS_ID` bigint(20) NOT NULL,
+ `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TBL_ID` bigint(20) NOT NULL,
+ `LONG_LOW_VALUE` bigint(20),
+ `LONG_HIGH_VALUE` bigint(20),
+ `DOUBLE_HIGH_VALUE` double(53,4),
+ `DOUBLE_LOW_VALUE` double(53,4),
+ `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `NUM_NULLS` bigint(20) NOT NULL,
+ `NUM_DISTINCTS` bigint(20),
+ `AVG_COL_LEN` double(53,4),
+ `MAX_COL_LEN` bigint(20),
+ `NUM_TRUES` bigint(20),
+ `NUM_FALSES` bigint(20),
+ `LAST_ANALYZED` bigint(20) NOT NULL,
+  PRIMARY KEY (`CS_ID`),
+  CONSTRAINT `TAB_COL_STATS_FK` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for table `PART_COL_STATS`
+--
+CREATE TABLE IF NOT EXISTS `PART_COL_STATS` (
+ `CS_ID` bigint(20) NOT NULL,
+ `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PART_ID` bigint(20) NOT NULL,
+ `LONG_LOW_VALUE` bigint(20),
+ `LONG_HIGH_VALUE` bigint(20),
+ `DOUBLE_HIGH_VALUE` double(53,4),
+ `DOUBLE_LOW_VALUE` double(53,4),
+ `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `NUM_NULLS` bigint(20) NOT NULL,
+ `NUM_DISTINCTS` bigint(20),
+ `AVG_COL_LEN` double(53,4),
+ `MAX_COL_LEN` bigint(20),
+ `NUM_TRUES` bigint(20),
+ `NUM_FALSES` bigint(20),
+ `LAST_ANALYZED` bigint(20) NOT NULL,
+  PRIMARY KEY (`CS_ID`),
+  CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for table `TYPES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TYPES` (
+  `TYPES_ID` bigint(20) NOT NULL,
+  `TYPE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TYPE1` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TYPE2` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`TYPES_ID`),
+  UNIQUE KEY `UNIQUE_TYPE` (`TYPE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TYPE_FIELDS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TYPE_FIELDS` (
+  `TYPE_NAME` bigint(20) NOT NULL,
+  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `FIELD_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `FIELD_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`TYPE_NAME`,`FIELD_NAME`),
+  KEY `TYPE_FIELDS_N49` (`TYPE_NAME`),
+  CONSTRAINT `TYPE_FIELDS_FK1` FOREIGN KEY (`TYPE_NAME`) REFERENCES `TYPES` (`TYPES_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+-- Table `MASTER_KEYS` for classes [org.apache.hadoop.hive.metastore.model.MMasterKey]
+CREATE TABLE IF NOT EXISTS `MASTER_KEYS`
+(
+    `KEY_ID` INTEGER NOT NULL AUTO_INCREMENT,
+    `MASTER_KEY` VARCHAR(767) BINARY NULL,
+    PRIMARY KEY (`KEY_ID`)
+) ENGINE=INNODB DEFAULT CHARSET=latin1;
+
+-- Table `DELEGATION_TOKENS` for classes [org.apache.hadoop.hive.metastore.model.MDelegationToken]
+CREATE TABLE IF NOT EXISTS `DELEGATION_TOKENS`
+(
+    `TOKEN_IDENT` VARCHAR(767) BINARY NOT NULL,
+    `TOKEN` VARCHAR(767) BINARY NULL,
+    PRIMARY KEY (`TOKEN_IDENT`)
+) ENGINE=INNODB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for VERSION
+--
+CREATE TABLE IF NOT EXISTS `VERSION` (
+  `VER_ID` BIGINT NOT NULL,
+  `SCHEMA_VERSION` VARCHAR(127) NOT NULL,
+  `VERSION_COMMENT` VARCHAR(255),
+  PRIMARY KEY (`VER_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for table FUNCS
+--
+CREATE TABLE IF NOT EXISTS `FUNCS` (
+  `FUNC_ID` BIGINT(20) NOT NULL,
+  `CLASS_NAME` VARCHAR(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+  `CREATE_TIME` INT(11) NOT NULL,
+  `DB_ID` BIGINT(20),
+  `FUNC_NAME` VARCHAR(128) CHARACTER SET latin1 COLLATE latin1_bin,
+  `FUNC_TYPE` INT(11) NOT NULL,
+  `OWNER_NAME` VARCHAR(128) CHARACTER SET latin1 COLLATE latin1_bin,
+  `OWNER_TYPE` VARCHAR(10) CHARACTER SET latin1 COLLATE latin1_bin,
+  PRIMARY KEY (`FUNC_ID`),
+  UNIQUE KEY `UNIQUEFUNCTION` (`FUNC_NAME`, `DB_ID`),
+  KEY `FUNCS_N49` (`DB_ID`),
+  CONSTRAINT `FUNCS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for table FUNC_RU
+--
+CREATE TABLE IF NOT EXISTS `FUNC_RU` (
+  `FUNC_ID` BIGINT(20) NOT NULL,
+  `RESOURCE_TYPE` INT(11) NOT NULL,
+  `RESOURCE_URI` VARCHAR(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+  `INTEGER_IDX` INT(11) NOT NULL,
+  PRIMARY KEY (`FUNC_ID`, `INTEGER_IDX`),
+  CONSTRAINT `FUNC_RU_FK1` FOREIGN KEY (`FUNC_ID`) REFERENCES `FUNCS` (`FUNC_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+
+-- -----------------------------------------------------------------------------------------------------------------------------------------------
+-- Transaction and Lock Tables
+-- These are not part of package jdo, so if you are going to regenerate this file you need to manually add the following section back to the file.
+-- -----------------------------------------------------------------------------------------------------------------------------------------------
+
+CREATE TABLE TXNS (
+  TXN_ID bigint PRIMARY KEY,
+  TXN_STATE char(1) NOT NULL,
+  TXN_STARTED bigint NOT NULL,
+  TXN_LAST_HEARTBEAT bigint NOT NULL,
+  TXN_USER varchar(128) NOT NULL,
+  TXN_HOST varchar(128) NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE TXN_COMPONENTS (
+  TC_TXNID bigint,
+  TC_DATABASE varchar(128) NOT NULL,
+  TC_TABLE varchar(128),
+  TC_PARTITION varchar(767),
+  FOREIGN KEY (TC_TXNID) REFERENCES TXNS (TXN_ID)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE COMPLETED_TXN_COMPONENTS (
+  CTC_TXNID bigint,
+  CTC_DATABASE varchar(128) NOT NULL,
+  CTC_TABLE varchar(128),
+  CTC_PARTITION varchar(767)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE NEXT_TXN_ID (
+  NTXN_NEXT bigint NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+INSERT INTO NEXT_TXN_ID VALUES(1);
+
+CREATE TABLE HIVE_LOCKS (
+  HL_LOCK_EXT_ID bigint NOT NULL,
+  HL_LOCK_INT_ID bigint NOT NULL,
+  HL_TXNID bigint,
+  HL_DB varchar(128) NOT NULL,
+  HL_TABLE varchar(128),
+  HL_PARTITION varchar(767),
+  HL_LOCK_STATE char(1) not null,
+  HL_LOCK_TYPE char(1) not null,
+  HL_LAST_HEARTBEAT bigint NOT NULL,
+  HL_ACQUIRED_AT bigint,
+  HL_USER varchar(128) NOT NULL,
+  HL_HOST varchar(128) NOT NULL,
+  PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID),
+  KEY HIVE_LOCK_TXNID_INDEX (HL_TXNID)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE INDEX HL_TXNID_IDX ON HIVE_LOCKS (HL_TXNID);
+
+CREATE TABLE NEXT_LOCK_ID (
+  NL_NEXT bigint NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+INSERT INTO NEXT_LOCK_ID VALUES(1);
+
+CREATE TABLE COMPACTION_QUEUE (
+  CQ_ID bigint PRIMARY KEY,
+  CQ_DATABASE varchar(128) NOT NULL,
+  CQ_TABLE varchar(128) NOT NULL,
+  CQ_PARTITION varchar(767),
+  CQ_STATE char(1) NOT NULL,
+  CQ_TYPE char(1) NOT NULL,
+  CQ_WORKER_ID varchar(128),
+  CQ_START bigint,
+  CQ_RUN_AS varchar(128)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
+  NCQ_NEXT bigint NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+
+
+-- -----------------------------------------------------------------
+-- Record schema version. Should be the last step in the init script
+-- -----------------------------------------------------------------
+INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '0.13.0', 'Hive release version 0.13.0');
+
+/*!40101 SET character_set_client = @saved_cs_client */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
+/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
+/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2012-08-23  0:56:31


[51/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/1863c3b9
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/1863c3b9
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/1863c3b9

Branch: refs/heads/branch-feature-AMBARI-21348
Commit: 1863c3b90b6bfa45b7ecfef15d2eee08b7539e91
Parents: 7ad307c
Author: Alejandro Fernandez <af...@hortonworks.com>
Authored: Tue Jun 27 15:41:36 2017 -0700
Committer: Alejandro Fernandez <af...@hortonworks.com>
Committed: Tue Jun 27 17:21:03 2017 -0700

----------------------------------------------------------------------
 .../4.0/blueprints/multinode-default.json       |  182 +
 .../4.0/blueprints/singlenode-default.json      |  133 +
 .../4.0/configuration/cluster-env.xml           |  304 +
 .../4.0/hooks/after-INSTALL/scripts/hook.py     |   38 +
 .../4.0/hooks/after-INSTALL/scripts/params.py   |   88 +
 .../scripts/shared_initialization.py            |   89 +
 .../hooks/before-ANY/files/changeToSecureUid.sh |   63 +
 .../4.0/hooks/before-ANY/scripts/hook.py        |   36 +
 .../4.0/hooks/before-ANY/scripts/params.py      |  226 +
 .../before-ANY/scripts/shared_initialization.py |  242 +
 .../4.0/hooks/before-INSTALL/scripts/hook.py    |   37 +
 .../4.0/hooks/before-INSTALL/scripts/params.py  |  111 +
 .../scripts/repo_initialization.py              |   90 +
 .../scripts/shared_initialization.py            |   34 +
 .../4.0/hooks/before-RESTART/scripts/hook.py    |   29 +
 .../hooks/before-START/files/checkForFormat.sh  |   65 +
 .../before-START/files/fast-hdfs-resource.jar   |  Bin 0 -> 28296598 bytes
 .../before-START/files/task-log4j.properties    |  134 +
 .../hooks/before-START/files/topology_script.py |   66 +
 .../4.0/hooks/before-START/scripts/hook.py      |   40 +
 .../4.0/hooks/before-START/scripts/params.py    |  211 +
 .../before-START/scripts/rack_awareness.py      |   71 +
 .../scripts/shared_initialization.py            |  152 +
 .../templates/commons-logging.properties.j2     |   43 +
 .../templates/exclude_hosts_list.j2             |   21 +
 .../templates/hadoop-metrics2.properties.j2     |   88 +
 .../before-START/templates/health_check.j2      |   81 +
 .../templates/include_hosts_list.j2             |   21 +
 .../templates/topology_mappings.data.j2         |   24 +
 .../stacks/BigInsights/4.0/kerberos.json        |   68 +
 .../stacks/BigInsights/4.0/metainfo.xml         |   22 +
 .../4.0/properties/stack_features.json          |  212 +
 .../BigInsights/4.0/properties/stack_tools.json |    4 +
 .../stacks/BigInsights/4.0/repos/repoinfo.xml   |   35 +
 .../BigInsights/4.0/role_command_order.json     |   70 +
 .../4.0/services/AMBARI_METRICS/alerts.json     |  183 +
 .../AMBARI_METRICS/configuration/ams-env.xml    |  107 +
 .../configuration/ams-hbase-env.xml             |  234 +
 .../configuration/ams-hbase-log4j.xml           |  146 +
 .../configuration/ams-hbase-policy.xml          |   53 +
 .../configuration/ams-hbase-security-site.xml   |  149 +
 .../configuration/ams-hbase-site.xml            |  384 +
 .../AMBARI_METRICS/configuration/ams-log4j.xml  |   65 +
 .../AMBARI_METRICS/configuration/ams-site.xml   |  527 +
 .../4.0/services/AMBARI_METRICS/kerberos.json   |  122 +
 .../4.0/services/AMBARI_METRICS/metainfo.xml    |  147 +
 .../4.0/services/AMBARI_METRICS/metrics.json    | 2472 +++++
 .../alerts/alert_ambari_metrics_monitor.py      |  104 +
 .../package/files/hbaseSmokeVerify.sh           |   34 +
 .../files/service-metrics/AMBARI_METRICS.txt    |  245 +
 .../package/files/service-metrics/FLUME.txt     |   17 +
 .../package/files/service-metrics/HBASE.txt     |  588 ++
 .../package/files/service-metrics/HDFS.txt      |  277 +
 .../package/files/service-metrics/HOST.txt      |   37 +
 .../package/files/service-metrics/KAFKA.txt     |  190 +
 .../package/files/service-metrics/STORM.txt     |    7 +
 .../package/files/service-metrics/YARN.txt      |  178 +
 .../AMBARI_METRICS/package/scripts/__init__.py  |   19 +
 .../AMBARI_METRICS/package/scripts/ams.py       |  388 +
 .../package/scripts/ams_service.py              |  103 +
 .../AMBARI_METRICS/package/scripts/functions.py |   51 +
 .../AMBARI_METRICS/package/scripts/hbase.py     |  267 +
 .../package/scripts/hbase_master.py             |   70 +
 .../package/scripts/hbase_regionserver.py       |   66 +
 .../package/scripts/hbase_service.py            |   53 +
 .../package/scripts/metrics_collector.py        |  133 +
 .../package/scripts/metrics_monitor.py          |   58 +
 .../AMBARI_METRICS/package/scripts/params.py    |  254 +
 .../package/scripts/params_linux.py             |   50 +
 .../package/scripts/params_windows.py           |   53 +
 .../package/scripts/service_check.py            |  165 +
 .../package/scripts/service_mapping.py          |   22 +
 .../package/scripts/split_points.py             |  236 +
 .../AMBARI_METRICS/package/scripts/status.py    |   46 +
 .../package/scripts/status_params.py            |   39 +
 .../package/templates/ams.conf.j2               |   35 +
 .../templates/ams_collector_jaas.conf.j2        |   26 +
 .../templates/ams_zookeeper_jaas.conf.j2        |   26 +
 .../hadoop-metrics2-hbase.properties.j2         |   63 +
 .../package/templates/hbase_client_jaas.conf.j2 |   23 +
 .../templates/hbase_grant_permissions.j2        |   39 +
 .../package/templates/hbase_master_jaas.conf.j2 |   26 +
 .../templates/hbase_regionserver_jaas.conf.j2   |   26 +
 .../package/templates/metric_groups.conf.j2     |   37 +
 .../package/templates/metric_monitor.ini.j2     |   31 +
 .../package/templates/regionservers.j2          |   20 +
 .../package/templates/smoketest_metrics.json.j2 |   15 +
 .../BigInsights/4.0/services/FLUME/alerts.json  |   27 +
 .../services/FLUME/configuration/flume-conf.xml |   37 +
 .../services/FLUME/configuration/flume-env.xml  |   90 +
 .../BigInsights/4.0/services/FLUME/metainfo.xml |   69 +
 .../BigInsights/4.0/services/FLUME/metrics.json |  430 +
 .../package/alerts/alert_flume_agent_status.py  |  106 +
 .../4.0/services/FLUME/package/scripts/flume.py |  228 +
 .../FLUME/package/scripts/flume_check.py        |   40 +
 .../FLUME/package/scripts/flume_handler.py      |  145 +
 .../FLUME/package/scripts/flume_upgrade.py      |   94 +
 .../services/FLUME/package/scripts/params.py    |  101 +
 .../FLUME/package/scripts/params_linux.py       |   30 +
 .../templates/flume-metrics2.properties.j2      |   26 +
 .../FLUME/package/templates/flume.conf.j2       |   24 +
 .../FLUME/package/templates/log4j.properties.j2 |   67 +
 .../BigInsights/4.0/services/HBASE/alerts.json  |  157 +
 .../services/HBASE/configuration/hbase-env.xml  |  175 +
 .../configuration/hbase-javaopts-properties.xml |   27 +
 .../HBASE/configuration/hbase-log4j.xml         |  143 +
 .../HBASE/configuration/hbase-policy.xml        |   53 +
 .../services/HBASE/configuration/hbase-site.xml |  681 ++
 .../4.0/services/HBASE/kerberos.json            |  159 +
 .../BigInsights/4.0/services/HBASE/metainfo.xml |  161 +
 .../BigInsights/4.0/services/HBASE/metrics.json | 9410 +++++++++++++++++
 .../HBASE/package/files/draining_servers.rb     |  164 +
 .../HBASE/package/files/hbaseSmokeVerify.sh     |   34 +
 .../services/HBASE/package/scripts/__init__.py  |   19 +
 .../services/HBASE/package/scripts/functions.py |   54 +
 .../4.0/services/HBASE/package/scripts/hbase.py |  232 +
 .../HBASE/package/scripts/hbase_client.py       |   65 +
 .../HBASE/package/scripts/hbase_decommission.py |   74 +
 .../HBASE/package/scripts/hbase_master.py       |  129 +
 .../HBASE/package/scripts/hbase_regionserver.py |  131 +
 .../package/scripts/hbase_restgatewayserver.py  |   84 +
 .../HBASE/package/scripts/hbase_service.py      |   51 +
 .../HBASE/package/scripts/hbase_upgrade.py      |   37 +
 .../services/HBASE/package/scripts/params.py    |  197 +
 .../HBASE/package/scripts/service_check.py      |   78 +
 .../HBASE/package/scripts/status_params.py      |   44 +
 .../services/HBASE/package/scripts/upgrade.py   |   52 +
 ...-metrics2-hbase.properties-GANGLIA-MASTER.j2 |  109 +
 ...doop-metrics2-hbase.properties-GANGLIA-RS.j2 |  107 +
 .../HBASE/package/templates/hbase-smoke.sh.j2   |   44 +
 .../HBASE/package/templates/hbase.conf.j2       |   35 +
 .../package/templates/hbase_client_jaas.conf.j2 |   23 +
 .../templates/hbase_grant_permissions.j2        |   39 +
 .../package/templates/hbase_master_jaas.conf.j2 |   26 +
 .../templates/hbase_regionserver_jaas.conf.j2   |   26 +
 .../package/templates/hbase_rest_jaas.conf.j2   |   26 +
 .../HBASE/package/templates/regionservers.j2    |   20 +
 .../BigInsights/4.0/services/HBASE/widgets.json |  510 +
 .../BigInsights/4.0/services/HDFS/alerts.json   |  657 ++
 .../services/HDFS/configuration/core-site.xml   |  189 +
 .../services/HDFS/configuration/hadoop-env.xml  |  308 +
 .../HDFS/configuration/hadoop-policy.xml        |  134 +
 .../services/HDFS/configuration/hdfs-log4j.xml  |  201 +
 .../services/HDFS/configuration/hdfs-site.xml   |  606 ++
 .../services/HDFS/configuration/ssl-client.xml  |   58 +
 .../services/HDFS/configuration/ssl-server.xml  |   64 +
 .../BigInsights/4.0/services/HDFS/kerberos.json |  242 +
 .../BigInsights/4.0/services/HDFS/metainfo.xml  |  234 +
 .../BigInsights/4.0/services/HDFS/metrics.json  | 7769 +++++++++++++++
 .../package/alerts/alert_checkpoint_time.py     |  146 +
 .../package/alerts/alert_ha_namenode_health.py  |  176 +
 .../HDFS/package/files/checkForFormat.sh        |   70 +
 .../services/HDFS/package/files/checkWebUI.py   |   56 +
 .../scripts/balancer-emulator/hdfs-command.py   |   45 +
 .../services/HDFS/package/scripts/datanode.py   |  144 +
 .../HDFS/package/scripts/datanode_upgrade.py    |  114 +
 .../4.0/services/HDFS/package/scripts/hdfs.py   |  129 +
 .../HDFS/package/scripts/hdfs_client.py         |  112 +
 .../HDFS/package/scripts/hdfs_datanode.py       |   75 +
 .../HDFS/package/scripts/hdfs_namenode.py       |  483 +
 .../HDFS/package/scripts/hdfs_nfsgateway.py     |   72 +
 .../HDFS/package/scripts/hdfs_rebalance.py      |  130 +
 .../HDFS/package/scripts/hdfs_snamenode.py      |   50 +
 .../HDFS/package/scripts/journalnode.py         |  169 +
 .../HDFS/package/scripts/journalnode_upgrade.py |  136 +
 .../services/HDFS/package/scripts/namenode.py   |  319 +
 .../HDFS/package/scripts/namenode_ha_state.py   |  205 +
 .../HDFS/package/scripts/namenode_upgrade.py    |  262 +
 .../services/HDFS/package/scripts/nfsgateway.py |  138 +
 .../4.0/services/HDFS/package/scripts/params.py |  326 +
 .../HDFS/package/scripts/service_check.py       |  119 +
 .../services/HDFS/package/scripts/snamenode.py  |  142 +
 .../HDFS/package/scripts/status_params.py       |   42 +
 .../4.0/services/HDFS/package/scripts/utils.py  |  357 +
 .../services/HDFS/package/scripts/zkfc_slave.py |  148 +
 .../package/templates/exclude_hosts_list.j2     |   21 +
 .../HDFS/package/templates/hdfs.conf.j2         |   35 +
 .../services/HDFS/package/templates/slaves.j2   |   21 +
 .../BigInsights/4.0/services/HDFS/widgets.json  |  428 +
 .../BigInsights/4.0/services/HIVE/alerts.json   |  111 +
 .../services/HIVE/configuration/hcat-env.xml    |   57 +
 .../services/HIVE/configuration/hive-env.xml    |  195 +
 .../HIVE/configuration/hive-exec-log4j.xml      |  118 +
 .../services/HIVE/configuration/hive-log4j.xml  |  136 +
 .../services/HIVE/configuration/hive-site.xml   | 1111 +++
 .../services/HIVE/configuration/webhcat-env.xml |   54 +
 .../HIVE/configuration/webhcat-log4j.xml        |   78 +
 .../HIVE/configuration/webhcat-site.xml         |  167 +
 .../HIVE/etc/hive-schema-0.12.0.mysql.sql       |  777 ++
 .../HIVE/etc/hive-schema-0.12.0.oracle.sql      |  717 ++
 .../HIVE/etc/hive-schema-0.12.0.postgres.sql    | 1405 +++
 .../HIVE/etc/hive-schema-0.13.0.mysql.sql       |  889 ++
 .../HIVE/etc/hive-schema-0.13.0.oracle.sql      |  834 ++
 .../HIVE/etc/hive-schema-0.13.0.postgres.sql    | 1537 +++
 .../HIVE/etc/hive-schema-0.14.0.mysql.sql       |  889 ++
 .../HIVE/etc/hive-schema-0.14.0.oracle.sql      |  833 ++
 .../HIVE/etc/hive-schema-0.14.0.postgres.sql    | 1541 +++
 .../etc/upgrade-0.12.0-to-0.13.0.oracle.sql     |  165 +
 .../services/HIVE/etc/upgrade-0.13.0.oracle.sql |   38 +
 .../BigInsights/4.0/services/HIVE/kerberos.json |  112 +
 .../BigInsights/4.0/services/HIVE/metainfo.xml  |  327 +
 .../HIVE/package/alerts/alert_hive_metastore.py |  184 +
 .../package/alerts/alert_hive_thrift_port.py    |  265 +
 .../HIVE/package/alerts/alert_webhcat_server.py |  242 +
 .../package/etc/hive-schema-0.12.0.mysql.sql    |  777 ++
 .../package/etc/hive-schema-0.12.0.oracle.sql   |  717 ++
 .../package/etc/hive-schema-0.12.0.postgres.sql | 1405 +++
 .../services/HIVE/package/files/addMysqlUser.sh |   37 +
 .../services/HIVE/package/files/hcatSmoke.sh    |   36 +
 .../services/HIVE/package/files/hiveSmoke.sh    |   24 +
 .../services/HIVE/package/files/hiveserver2.sql |   23 +
 .../HIVE/package/files/hiveserver2Smoke.sh      |   32 +
 .../4.0/services/HIVE/package/files/pigSmoke.sh |   18 +
 .../HIVE/package/files/removeMysqlUser.sh       |   33 +
 .../HIVE/package/files/startMetastore.sh        |   25 +
 .../HIVE/package/files/templetonSmoke.sh        |  106 +
 .../services/HIVE/package/scripts/__init__.py   |   19 +
 .../4.0/services/HIVE/package/scripts/hcat.py   |   73 +
 .../HIVE/package/scripts/hcat_client.py         |   50 +
 .../HIVE/package/scripts/hcat_service_check.py  |   78 +
 .../4.0/services/HIVE/package/scripts/hive.py   |  393 +
 .../HIVE/package/scripts/hive_client.py         |   81 +
 .../HIVE/package/scripts/hive_metastore.py      |  199 +
 .../HIVE/package/scripts/hive_server.py         |  166 +
 .../HIVE/package/scripts/hive_server_upgrade.py |  174 +
 .../HIVE/package/scripts/hive_service.py        |  139 +
 .../HIVE/package/scripts/mysql_server.py        |   64 +
 .../HIVE/package/scripts/mysql_service.py       |   52 +
 .../HIVE/package/scripts/mysql_users.py         |   69 +
 .../HIVE/package/scripts/mysql_utils.py         |   34 +
 .../4.0/services/HIVE/package/scripts/params.py |  418 +
 .../HIVE/package/scripts/postgresql_server.py   |  109 +
 .../HIVE/package/scripts/postgresql_service.py  |   39 +
 .../HIVE/package/scripts/service_check.py       |   91 +
 .../HIVE/package/scripts/status_params.py       |   87 +
 .../services/HIVE/package/scripts/webhcat.py    |  117 +
 .../HIVE/package/scripts/webhcat_server.py      |  146 +
 .../HIVE/package/scripts/webhcat_service.py     |   60 +
 .../package/scripts/webhcat_service_check.py    |  117 +
 .../HIVE/package/templates/hive.conf.j2         |   35 +
 .../package/templates/startHiveserver2.sh.j2    |   24 +
 .../package/templates/templeton_smoke.pig.j2    |   24 +
 .../BigInsights/4.0/services/KAFKA/alerts.json  |   32 +
 .../KAFKA/configuration/kafka-broker.xml        |  414 +
 .../services/KAFKA/configuration/kafka-env.xml  |   69 +
 .../KAFKA/configuration/kafka-log4j.xml         |  116 +
 .../4.0/services/KAFKA/kerberos.json            |   49 +
 .../BigInsights/4.0/services/KAFKA/metainfo.xml |   83 +
 .../BigInsights/4.0/services/KAFKA/metrics.json |  264 +
 .../4.0/services/KAFKA/package/scripts/kafka.py |  239 +
 .../KAFKA/package/scripts/kafka_broker.py       |  111 +
 .../KAFKA/package/scripts/kafka_upgrade.py      |   38 +
 .../services/KAFKA/package/scripts/params.py    |  115 +
 .../KAFKA/package/scripts/service_check.py      |   59 +
 .../KAFKA/package/scripts/status_params.py      |   26 +
 .../services/KAFKA/package/scripts/upgrade.py   |   88 +
 .../4.0/services/KAFKA/package/scripts/utils.py |   38 +
 .../KAFKA/package/templates/kafka_jaas.conf.j2  |   41 +
 .../KERBEROS/configuration/kerberos-env.xml     |  308 +
 .../KERBEROS/configuration/krb5-conf.xml        |  109 +
 .../4.0/services/KERBEROS/kerberos.json         |   17 +
 .../4.0/services/KERBEROS/metainfo.xml          |  147 +
 .../KERBEROS/package/scripts/kerberos_client.py |   79 +
 .../KERBEROS/package/scripts/kerberos_common.py |  473 +
 .../KERBEROS/package/scripts/kerberos_server.py |  141 +
 .../services/KERBEROS/package/scripts/params.py |  200 +
 .../KERBEROS/package/scripts/service_check.py   |   81 +
 .../KERBEROS/package/scripts/status_params.py   |   32 +
 .../services/KERBEROS/package/scripts/utils.py  |  105 +
 .../KERBEROS/package/templates/kadm5_acl.j2     |   20 +
 .../KERBEROS/package/templates/kdc_conf.j2      |   30 +
 .../KERBEROS/package/templates/krb5_conf.j2     |   55 +
 .../BigInsights/4.0/services/KNOX/alerts.json   |   32 +
 .../KNOX/configuration/gateway-log4j.xml        |   83 +
 .../KNOX/configuration/gateway-site.xml         |   68 +
 .../services/KNOX/configuration/knox-env.xml    |   64 +
 .../services/KNOX/configuration/ldap-log4j.xml  |   66 +
 .../services/KNOX/configuration/topology.xml    |  157 +
 .../services/KNOX/configuration/users-ldif.xml  |  138 +
 .../BigInsights/4.0/services/KNOX/kerberos.json |   62 +
 .../BigInsights/4.0/services/KNOX/metainfo.xml  |   88 +
 .../KNOX/package/files/validateKnoxStatus.py    |   42 +
 .../4.0/services/KNOX/package/scripts/knox.py   |  134 +
 .../KNOX/package/scripts/knox_gateway.py        |  290 +
 .../services/KNOX/package/scripts/knox_ldap.py  |   54 +
 .../4.0/services/KNOX/package/scripts/ldap.py   |   55 +
 .../4.0/services/KNOX/package/scripts/params.py |  172 +
 .../KNOX/package/scripts/service_check.py       |   92 +
 .../KNOX/package/scripts/status_params.py       |   50 +
 .../services/KNOX/package/scripts/upgrade.py    |   72 +
 .../package/templates/krb5JAASLogin.conf.j2     |   29 +
 .../BigInsights/4.0/services/OOZIE/alerts.json  |   45 +
 .../services/OOZIE/configuration/oozie-env.xml  |  189 +
 .../OOZIE/configuration/oozie-log4j.xml         |  146 +
 .../services/OOZIE/configuration/oozie-site.xml |  382 +
 .../4.0/services/OOZIE/kerberos.json            |   70 +
 .../BigInsights/4.0/services/OOZIE/metainfo.xml |  176 +
 .../package/alerts/alert_check_oozie_server.py  |  211 +
 .../services/OOZIE/package/files/oozieSmoke2.sh |   88 +
 .../files/prepareOozieHdfsDirectories.sh        |   45 +
 .../OOZIE/package/files/wrap_ooziedb.sh         |   31 +
 .../4.0/services/OOZIE/package/scripts/oozie.py |  279 +
 .../OOZIE/package/scripts/oozie_client.py       |   76 +
 .../OOZIE/package/scripts/oozie_server.py       |  193 +
 .../package/scripts/oozie_server_upgrade.py     |  300 +
 .../OOZIE/package/scripts/oozie_service.py      |  124 +
 .../services/OOZIE/package/scripts/params.py    |  259 +
 .../OOZIE/package/scripts/service_check.py      |  140 +
 .../OOZIE/package/scripts/status_params.py      |   47 +
 .../OOZIE/package/templates/adminusers.txt.j2   |   28 +
 .../package/templates/oozie-log4j.properties.j2 |   93 +
 .../4.0/services/PIG/configuration/pig-env.xml  |   38 +
 .../services/PIG/configuration/pig-log4j.xml    |   65 +
 .../PIG/configuration/pig-properties.xml        |  631 ++
 .../BigInsights/4.0/services/PIG/kerberos.json  |   17 +
 .../BigInsights/4.0/services/PIG/metainfo.xml   |   86 +
 .../4.0/services/PIG/package/files/pigSmoke.sh  |   18 +
 .../4.0/services/PIG/package/scripts/params.py  |   25 +
 .../PIG/package/scripts/params_linux.py         |   88 +
 .../4.0/services/PIG/package/scripts/pig.py     |   61 +
 .../services/PIG/package/scripts/pig_client.py  |   59 +
 .../PIG/package/scripts/service_check.py        |  123 +
 .../SLIDER/configuration/slider-client.xml      |   60 +
 .../SLIDER/configuration/slider-env.xml         |   43 +
 .../SLIDER/configuration/slider-log4j.xml       |   89 +
 .../4.0/services/SLIDER/metainfo.xml            |  135 +
 .../SLIDER/package/files/hbaseSmokeVerify.sh    |   34 +
 .../services/SLIDER/package/scripts/__init__.py |   19 +
 .../services/SLIDER/package/scripts/params.py   |   53 +
 .../SLIDER/package/scripts/service_check.py     |   42 +
 .../services/SLIDER/package/scripts/slider.py   |   60 +
 .../SLIDER/package/scripts/slider_client.py     |   62 +
 .../package/templates/storm-slider-env.sh.j2    |   38 +
 .../services/SOLR/configuration/solr-env.xml    |  208 +
 .../services/SOLR/configuration/solr-log4j.xml  |   82 +
 .../services/SOLR/configuration/solr-site.xml   |   44 +
 .../BigInsights/4.0/services/SOLR/kerberos.json |   47 +
 .../BigInsights/4.0/services/SOLR/metainfo.xml  |   74 +
 .../services/SOLR/package/scripts/__init__.py   |   19 +
 .../4.0/services/SOLR/package/scripts/params.py |  182 +
 .../SOLR/package/scripts/service_check.py       |   60 +
 .../4.0/services/SOLR/package/scripts/solr.py   |  143 +
 .../SOLR/package/scripts/solr_client.py         |   36 +
 .../SOLR/package/scripts/solr_server.py         |  118 +
 .../SOLR/package/scripts/solr_service.py        |   59 +
 .../SOLR/package/scripts/solr_upgrade.py        |  135 +
 .../SOLR/package/scripts/status_params.py       |   34 +
 .../services/SOLR/package/templates/solr.xml.j2 |   51 +
 .../SOLR/package/templates/solr_jaas.conf.j2    |   26 +
 .../BigInsights/4.0/services/SPARK/alerts.json  |   32 +
 .../SPARK/configuration/spark-defaults.xml      |  159 +
 .../services/SPARK/configuration/spark-env.xml  |  110 +
 .../configuration/spark-javaopts-properties.xml |   27 +
 .../SPARK/configuration/spark-log4j.xml         |   42 +
 .../configuration/spark-metrics-properties.xml  |  160 +
 .../4.0/services/SPARK/kerberos.json            |   55 +
 .../BigInsights/4.0/services/SPARK/metainfo.xml |  187 +
 .../SPARK/package/scripts/job_history_server.py |  167 +
 .../services/SPARK/package/scripts/params.py    |  199 +
 .../SPARK/package/scripts/service_check.py      |   78 +
 .../4.0/services/SPARK/package/scripts/spark.py |  351 +
 .../SPARK/package/scripts/spark_client.py       |   61 +
 .../SPARK/package/scripts/status_params.py      |   41 +
 .../SPARK/package/scripts/thrift_server.py      |  125 +
 .../package/templates/spark-defaults.conf.j2    |   43 +
 .../services/SQOOP/configuration/sqoop-env.xml  |   59 +
 .../BigInsights/4.0/services/SQOOP/metainfo.xml |   93 +
 .../services/SQOOP/package/scripts/__init__.py  |   19 +
 .../services/SQOOP/package/scripts/params.py    |   95 +
 .../SQOOP/package/scripts/service_check.py      |   44 +
 .../4.0/services/SQOOP/package/scripts/sqoop.py |   84 +
 .../SQOOP/package/scripts/sqoop_client.py       |   57 +
 .../4.0/services/YARN/MAPREDUCE2_metrics.json   | 2596 +++++
 .../4.0/services/YARN/YARN_metrics.json         | 3486 +++++++
 .../4.0/services/YARN/YARN_widgets.json         |  617 ++
 .../BigInsights/4.0/services/YARN/alerts.json   |  398 +
 .../YARN/configuration-mapred/mapred-env.xml    |   82 +
 .../YARN/configuration-mapred/mapred-site.xml   |  479 +
 .../YARN/configuration/capacity-scheduler.xml   |  157 +
 .../services/YARN/configuration/yarn-env.xml    |  243 +
 .../services/YARN/configuration/yarn-log4j.xml  |   71 +
 .../services/YARN/configuration/yarn-site.xml   |  748 ++
 .../BigInsights/4.0/services/YARN/kerberos.json |  208 +
 .../BigInsights/4.0/services/YARN/metainfo.xml  |  264 +
 .../package/alerts/alert_nodemanager_health.py  |  143 +
 .../alerts/alert_nodemanagers_summary.py        |  122 +
 .../files/validateYarnComponentStatus.py        |  170 +
 .../services/YARN/package/scripts/__init__.py   |   20 +
 .../scripts/application_timeline_server.py      |  139 +
 .../YARN/package/scripts/historyserver.py       |  155 +
 .../package/scripts/mapred_service_check.py     |   80 +
 .../YARN/package/scripts/mapreduce2_client.py   |   56 +
 .../YARN/package/scripts/nodemanager.py         |  144 +
 .../YARN/package/scripts/nodemanager_upgrade.py |   74 +
 .../4.0/services/YARN/package/scripts/params.py |  224 +
 .../YARN/package/scripts/resourcemanager.py     |  179 +
 .../services/YARN/package/scripts/service.py    |   76 +
 .../YARN/package/scripts/service_check.py       |   89 +
 .../YARN/package/scripts/status_params.py       |   44 +
 .../4.0/services/YARN/package/scripts/yarn.py   |  277 +
 .../YARN/package/scripts/yarn_client.py         |   56 +
 .../package/templates/container-executor.cfg.j2 |   40 +
 .../package/templates/exclude_hosts_list.j2     |   21 +
 .../YARN/package/templates/mapreduce.conf.j2    |   35 +
 .../package/templates/taskcontroller.cfg.j2     |   38 +
 .../YARN/package/templates/yarn.conf.j2         |   35 +
 .../4.0/services/ZOOKEEPER/alerts.json          |   58 +
 .../ZOOKEEPER/configuration/zoo.cfg.xml         |   84 +
 .../ZOOKEEPER/configuration/zookeeper-env.xml   |   73 +
 .../ZOOKEEPER/configuration/zookeeper-log4j.xml |  101 +
 .../4.0/services/ZOOKEEPER/kerberos.json        |   39 +
 .../4.0/services/ZOOKEEPER/metainfo.xml         |   91 +
 .../services/ZOOKEEPER/package/files/zkEnv.sh   |   96 +
 .../ZOOKEEPER/package/files/zkServer.sh         |  120 +
 .../ZOOKEEPER/package/files/zkService.sh        |   26 +
 .../services/ZOOKEEPER/package/files/zkSmoke.sh |   93 +
 .../ZOOKEEPER/package/scripts/__init__.py       |   20 +
 .../ZOOKEEPER/package/scripts/params.py         |   96 +
 .../ZOOKEEPER/package/scripts/service_check.py  |   53 +
 .../ZOOKEEPER/package/scripts/status_params.py  |   43 +
 .../ZOOKEEPER/package/scripts/zookeeper.py      |  114 +
 .../package/scripts/zookeeper_client.py         |   71 +
 .../package/scripts/zookeeper_server.py         |  161 +
 .../package/scripts/zookeeper_service.py        |   58 +
 .../package/templates/configuration.xsl.j2      |   42 +
 .../ZOOKEEPER/package/templates/zoo.cfg.j2      |   53 +
 .../templates/zookeeper_client_jaas.conf.j2     |   23 +
 .../package/templates/zookeeper_jaas.conf.j2    |   26 +
 .../BigInsights/4.0/services/stack_advisor.py   |   24 +
 .../4.0/stack-advisor/stack_advisor_206.py      | 2006 ++++
 .../4.0/stack-advisor/stack_advisor_21.py       |  259 +
 .../4.0/stack-advisor/stack_advisor_22.py       | 1713 ++++
 .../4.0/stack-advisor/stack_advisor_23.py       |  995 ++
 .../4.0/stack-advisor/stack_advisor_24.py       |   29 +
 .../4.0/stack-advisor/stack_advisor_25.py       | 1940 ++++
 .../stacks/BigInsights/4.0/widgets.json         |   95 +
 .../stacks/BigInsights/4.1/kerberos.json        |   47 +
 .../stacks/BigInsights/4.1/metainfo.xml         |   23 +
 .../stacks/BigInsights/4.1/repos/repoinfo.xml   |   44 +
 .../4.1/repos/repoinfo.xml.amd64_RH6            |   32 +
 .../4.1/repos/repoinfo.xml.amd64_RH7            |   32 +
 .../4.1/repos/repoinfo.xml.amd64_SLES           |   32 +
 .../4.1/repos/repoinfo.xml.ppc64le_RH7          |   32 +
 .../4.1/repos/repoinfo.xml.s390x_RH7            |   32 +
 .../BigInsights/4.1/role_command_order.json     |   22 +
 .../4.1/services/AMBARI_METRICS/metainfo.xml    |   27 +
 .../services/FLUME/configuration/flume-env.xml  |   70 +
 .../BigInsights/4.1/services/FLUME/metainfo.xml |   36 +
 .../BigInsights/4.1/services/HBASE/metainfo.xml |   45 +
 .../4.1/services/HBASE/themes/theme.json        |  367 +
 .../services/HDFS/configuration/hadoop-env.xml  |  166 +
 .../services/HDFS/configuration/hdfs-site.xml   |   46 +
 .../BigInsights/4.1/services/HDFS/metainfo.xml  |  127 +
 .../4.1/services/HDFS/themes/theme.json         |  179 +
 .../BigInsights/4.1/services/HDFS/widgets.json  |  644 ++
 .../services/HIVE/configuration/hive-env.xml    |  189 +
 .../services/HIVE/configuration/hive-site.xml   |  338 +
 .../BigInsights/4.1/services/HIVE/metainfo.xml  |  106 +
 .../4.1/services/HIVE/themes/theme.json         |  327 +
 .../BigInsights/4.1/services/KAFKA/metainfo.xml |   27 +
 .../4.1/services/KERBEROS/metainfo.xml          |   26 +
 .../BigInsights/4.1/services/KNOX/metainfo.xml  |   46 +
 .../services/OOZIE/configuration/oozie-site.xml |   64 +
 .../BigInsights/4.1/services/OOZIE/metainfo.xml |  144 +
 .../BigInsights/4.1/services/PIG/metainfo.xml   |   38 +
 .../4.1/services/SLIDER/metainfo.xml            |   46 +
 .../BigInsights/4.1/services/SOLR/metainfo.xml  |   29 +
 .../BigInsights/4.1/services/SPARK/metainfo.xml |   52 +
 .../BigInsights/4.1/services/SQOOP/metainfo.xml |   45 +
 .../4.1/services/YARN/YARN_widgets.json         |  676 ++
 .../YARN/configuration-mapred/mapred-site.xml   |   50 +
 .../services/YARN/configuration/yarn-site.xml   |   43 +
 .../BigInsights/4.1/services/YARN/metainfo.xml  |   82 +
 .../4.1/services/YARN/themes-mapred/theme.json  |  132 +
 .../4.1/services/YARN/themes/theme.json         |  250 +
 .../4.1/services/ZOOKEEPER/metainfo.xml         |   38 +
 .../BigInsights/4.1/services/stack_advisor.py   |   37 +
 .../4.2.5/hooks/after-INSTALL/scripts/hook.py   |   37 +
 .../4.2.5/hooks/after-INSTALL/scripts/params.py |  101 +
 .../scripts/shared_initialization.py            |  108 +
 .../hooks/before-ANY/files/changeToSecureUid.sh |   63 +
 .../4.2.5/hooks/before-ANY/scripts/hook.py      |   36 +
 .../4.2.5/hooks/before-ANY/scripts/params.py    |  241 +
 .../before-ANY/scripts/shared_initialization.py |  253 +
 .../4.2.5/hooks/before-INSTALL/scripts/hook.py  |   37 +
 .../hooks/before-INSTALL/scripts/params.py      |  113 +
 .../scripts/repo_initialization.py              |   97 +
 .../scripts/shared_initialization.py            |   37 +
 .../4.2.5/hooks/before-RESTART/scripts/hook.py  |   29 +
 .../hooks/before-START/files/checkForFormat.sh  |   65 +
 .../before-START/files/fast-hdfs-resource.jar   |  Bin 0 -> 28296598 bytes
 .../before-START/files/task-log4j.properties    |  134 +
 .../hooks/before-START/files/topology_script.py |   66 +
 .../before-START/scripts/custom_extensions.py   |  168 +
 .../4.2.5/hooks/before-START/scripts/hook.py    |   41 +
 .../4.2.5/hooks/before-START/scripts/params.py  |  318 +
 .../before-START/scripts/rack_awareness.py      |   47 +
 .../scripts/shared_initialization.py            |  177 +
 .../templates/commons-logging.properties.j2     |   43 +
 .../templates/exclude_hosts_list.j2             |   21 +
 .../templates/hadoop-metrics2.properties.j2     |  108 +
 .../before-START/templates/health_check.j2      |   81 +
 .../templates/include_hosts_list.j2             |   21 +
 .../templates/topology_mappings.data.j2         |   24 +
 .../stacks/BigInsights/4.2.5/kerberos.json      |   47 +
 .../stacks/BigInsights/4.2.5/metainfo.xml       |   25 +
 .../stacks/BigInsights/4.2.5/repos/repoinfo.xml |   32 +
 .../4.2.5/repos/repoinfo.xml.amd64_RH6          |   32 +
 .../4.2.5/repos/repoinfo.xml.amd64_RH7          |   32 +
 .../4.2.5/repos/repoinfo.xml.amd64_SLES         |   32 +
 .../4.2.5/repos/repoinfo.xml.ppc64le_RH7        |   32 +
 .../BigInsights/4.2.5/role_command_order.json   |   31 +
 .../4.2.5/services/AMBARI_INFRA/metainfo.xml    |   26 +
 .../AMBARI_INFRA/role_command_order.json        |    7 +
 .../4.2.5/services/AMBARI_METRICS/metainfo.xml  |   27 +
 .../4.2.5/services/FLUME/metainfo.xml           |   39 +
 .../services/HBASE/configuration/hbase-env.xml  |  198 +
 .../services/HBASE/configuration/hbase-site.xml |  388 +
 .../HBASE/configuration/ranger-hbase-audit.xml  |  121 +
 .../ranger-hbase-plugin-properties.xml          |   83 +
 .../ranger-hbase-policymgr-ssl.xml              |   66 +
 .../configuration/ranger-hbase-security.xml     |   68 +
 .../4.2.5/services/HBASE/kerberos.json          |  212 +
 .../4.2.5/services/HBASE/metainfo.xml           |   88 +
 .../4.2.5/services/HBASE/metrics.json           | 9370 +++++++++++++++++
 .../services/HBASE/quicklinks/quicklinks.json   |  121 +
 .../4.2.5/services/HBASE/themes/theme.json      |  411 +
 .../4.2.5/services/HBASE/widgets.json           |  510 +
 .../services/HDFS/configuration/core-site.xml   |   53 +
 .../services/HDFS/configuration/hadoop-env.xml  |  212 +
 .../services/HDFS/configuration/hdfs-log4j.xml  |  225 +
 .../services/HDFS/configuration/hdfs-site.xml   |  146 +
 .../HDFS/configuration/ranger-hdfs-audit.xml    |  121 +
 .../ranger-hdfs-plugin-properties.xml           |   78 +
 .../configuration/ranger-hdfs-policymgr-ssl.xml |   66 +
 .../HDFS/configuration/ranger-hdfs-security.xml |   64 +
 .../4.2.5/services/HDFS/kerberos.json           |  247 +
 .../4.2.5/services/HDFS/metainfo.xml            |  138 +
 .../services/HDFS/quicklinks/quicklinks.json    |   92 +
 .../4.2.5/services/HDFS/themes/theme.json       |  179 +
 .../4.2.5/services/HDFS/widgets.json            |  649 ++
 .../HIVE/configuration/beeline-log4j2.xml       |   80 +
 .../hive-atlas-application.properties.xml       |   61 +
 .../services/HIVE/configuration/hive-env.xml    |  235 +
 .../HIVE/configuration/hive-exec-log4j2.xml     |  101 +
 .../HIVE/configuration/hive-interactive-env.xml |  257 +
 .../configuration/hive-interactive-site.xml     |  513 +
 .../services/HIVE/configuration/hive-log4j2.xml |  108 +
 .../services/HIVE/configuration/hive-site.xml   | 1773 ++++
 .../HIVE/configuration/hivemetastore-site.xml   |   43 +
 .../hiveserver2-interactive-site.xml            |   52 +
 .../HIVE/configuration/hiveserver2-site.xml     |  108 +
 .../HIVE/configuration/llap-cli-log4j2.xml      |  109 +
 .../HIVE/configuration/llap-daemon-log4j.xml    |  176 +
 .../HIVE/configuration/ranger-hive-audit.xml    |  175 +
 .../ranger-hive-plugin-properties.xml           |   62 +
 .../configuration/ranger-hive-policymgr-ssl.xml |   66 +
 .../HIVE/configuration/ranger-hive-security.xml |   68 +
 .../HIVE/configuration/webhcat-site.xml         |  128 +
 .../HIVE/etc/hive-schema-0.13.0.mysql.sql       |  889 ++
 .../HIVE/etc/hive-schema-0.13.0.oracle.sql      |  835 ++
 .../HIVE/etc/hive-schema-0.13.0.postgres.sql    | 1538 +++
 .../etc/upgrade-0.12.0-to-0.13.0.oracle.sql     |  165 +
 .../services/HIVE/etc/upgrade-0.13.0.oracle.sql |   38 +
 .../4.2.5/services/HIVE/kerberos.json           |  149 +
 .../4.2.5/services/HIVE/metainfo.xml            |  291 +
 .../services/HIVE/quicklinks/quicklinks.json    |   68 +
 .../4.2.5/services/HIVE/themes/theme.json       |  327 +
 .../4.2.5/services/JNBG/metainfo.xml            |   26 +
 .../KAFKA/configuration/ranger-kafka-audit.xml  |   58 +
 .../ranger-kafka-policymgr-ssl.xml              |   34 +
 .../4.2.5/services/KAFKA/kerberos.json          |   70 +
 .../4.2.5/services/KAFKA/metainfo.xml           |   56 +
 .../4.2.5/services/KERBEROS/metainfo.xml        |   27 +
 .../KNOX/configuration/gateway-site.xml         |   28 +
 .../services/KNOX/configuration/knox-env.xml    |   34 +
 .../KNOX/configuration/knoxsso-topology.xml     |  126 +
 .../KNOX/configuration/ranger-knox-audit.xml    |  121 +
 .../ranger-knox-plugin-properties.xml           |  157 +
 .../configuration/ranger-knox-policymgr-ssl.xml |   66 +
 .../KNOX/configuration/ranger-knox-security.xml |   58 +
 .../services/KNOX/configuration/topology.xml    |  214 +
 .../4.2.5/services/KNOX/kerberos.json           |   81 +
 .../4.2.5/services/KNOX/metainfo.xml            |   61 +
 .../4.2.5/services/LOGSEARCH/metainfo.xml       |   26 +
 .../services/LOGSEARCH/role_command_order.json  |    9 +
 .../4.2.5/services/OOZIE/metainfo.xml           |   65 +
 .../PIG/configuration/pig-properties.xml        |  632 ++
 .../BigInsights/4.2.5/services/PIG/metainfo.xml |   40 +
 .../4.2.5/services/R4ML/metainfo.xml            |   37 +
 .../configuration/ranger-tagsync-site.xml       |   46 +
 .../RANGER/configuration/ranger-ugsync-site.xml |   46 +
 .../4.2.5/services/RANGER/metainfo.xml          |   76 +
 .../RANGER_KMS/configuration/dbks-site.xml      |  104 +
 .../RANGER_KMS/configuration/kms-env.xml        |   44 +
 .../configuration/ranger-kms-audit.xml          |   85 +
 .../configuration/ranger-kms-policymgr-ssl.xml  |   34 +
 .../4.2.5/services/RANGER_KMS/kerberos.json     |   84 +
 .../4.2.5/services/RANGER_KMS/metainfo.xml      |   56 +
 .../RANGER_KMS/themes/theme_version_2.json      |  303 +
 .../4.2.5/services/SLIDER/metainfo.xml          |   46 +
 .../4.2.5/services/SOLR/metainfo.xml            |   55 +
 .../SPARK/configuration/spark-defaults.xml      |   32 +
 .../configuration/spark-thrift-sparkconf.xml    |   32 +
 .../4.2.5/services/SPARK/kerberos.json          |   70 +
 .../4.2.5/services/SPARK/metainfo.xml           |   67 +
 .../SPARK2/configuration/spark2-defaults.xml    |  130 +
 .../SPARK2/configuration/spark2-env.xml         |  146 +
 .../configuration/spark2-hive-site-override.xml |   67 +
 .../spark2-javaopts-properties.xml              |   28 +
 .../configuration/spark2-thrift-sparkconf.xml   |  168 +
 .../4.2.5/services/SPARK2/metainfo.xml          |  112 +
 .../sqoop-atlas-application.properties.xml      |   47 +
 .../services/SQOOP/configuration/sqoop-site.xml |   30 +
 .../4.2.5/services/SQOOP/kerberos.json          |   20 +
 .../4.2.5/services/SQOOP/metainfo.xml           |   47 +
 .../4.2.5/services/SYSTEMML/metainfo.xml        |   37 +
 .../4.2.5/services/TITAN/metainfo.xml           |   40 +
 .../4.2.5/services/YARN/YARN_widgets.json       |  670 ++
 .../YARN/configuration-mapred/mapred-env.xml    |   50 +
 .../YARN/configuration-mapred/mapred-site.xml   |  138 +
 .../YARN/configuration/capacity-scheduler.xml   |   70 +
 .../YARN/configuration/ranger-yarn-audit.xml    |  121 +
 .../ranger-yarn-plugin-properties.xml           |   82 +
 .../configuration/ranger-yarn-policymgr-ssl.xml |   66 +
 .../YARN/configuration/ranger-yarn-security.xml |   58 +
 .../services/YARN/configuration/yarn-env.xml    |  198 +
 .../services/YARN/configuration/yarn-log4j.xml  |  103 +
 .../services/YARN/configuration/yarn-site.xml   |  761 ++
 .../4.2.5/services/YARN/kerberos.json           |  278 +
 .../4.2.5/services/YARN/metainfo.xml            |  140 +
 .../YARN/quicklinks-mapred/quicklinks.json      |   92 +
 .../services/YARN/quicklinks/quicklinks.json    |   92 +
 .../services/YARN/themes-mapred/theme.json      |  132 +
 .../4.2.5/services/YARN/themes/theme.json       |  250 +
 .../4.2.5/services/ZOOKEEPER/metainfo.xml       |   37 +
 .../BigInsights/4.2.5/services/stack_advisor.py |  180 +
 .../stacks/BigInsights/4.2/metainfo.xml         |   25 +
 .../stacks/BigInsights/4.2/repos/repoinfo.xml   |   44 +
 .../4.2/repos/repoinfo.xml.amd64_RH6            |   32 +
 .../4.2/repos/repoinfo.xml.amd64_RH7            |   32 +
 .../4.2/repos/repoinfo.xml.amd64_SLES           |   32 +
 .../4.2/repos/repoinfo.xml.ppc64le_RH7          |   32 +
 .../4.2/repos/repoinfo.xml.s390x_RH7            |   32 +
 .../BigInsights/4.2/role_command_order.json     |   31 +
 .../4.2/services/AMBARI_METRICS/alerts.json     |  183 +
 .../AMBARI_METRICS/configuration/ams-env.xml    |  107 +
 .../configuration/ams-hbase-env.xml             |  234 +
 .../configuration/ams-hbase-log4j.xml           |  146 +
 .../configuration/ams-hbase-policy.xml          |   53 +
 .../configuration/ams-hbase-security-site.xml   |  149 +
 .../configuration/ams-hbase-site.xml            |  384 +
 .../AMBARI_METRICS/configuration/ams-log4j.xml  |   65 +
 .../AMBARI_METRICS/configuration/ams-site.xml   |  527 +
 .../4.2/services/AMBARI_METRICS/kerberos.json   |  122 +
 .../4.2/services/AMBARI_METRICS/metainfo.xml    |  147 +
 .../4.2/services/AMBARI_METRICS/metrics.json    | 2472 +++++
 .../alerts/alert_ambari_metrics_monitor.py      |  104 +
 .../package/files/hbaseSmokeVerify.sh           |   34 +
 .../files/service-metrics/AMBARI_METRICS.txt    |  245 +
 .../package/files/service-metrics/FLUME.txt     |   17 +
 .../package/files/service-metrics/HBASE.txt     |  588 ++
 .../package/files/service-metrics/HDFS.txt      |  277 +
 .../package/files/service-metrics/HOST.txt      |   37 +
 .../package/files/service-metrics/KAFKA.txt     |  190 +
 .../package/files/service-metrics/STORM.txt     |    7 +
 .../package/files/service-metrics/YARN.txt      |  178 +
 .../AMBARI_METRICS/package/scripts/__init__.py  |   19 +
 .../AMBARI_METRICS/package/scripts/ams.py       |  388 +
 .../package/scripts/ams_service.py              |  103 +
 .../AMBARI_METRICS/package/scripts/functions.py |   51 +
 .../AMBARI_METRICS/package/scripts/hbase.py     |  267 +
 .../package/scripts/hbase_master.py             |   70 +
 .../package/scripts/hbase_regionserver.py       |   66 +
 .../package/scripts/hbase_service.py            |   53 +
 .../package/scripts/metrics_collector.py        |  133 +
 .../package/scripts/metrics_monitor.py          |   59 +
 .../AMBARI_METRICS/package/scripts/params.py    |  257 +
 .../package/scripts/params_linux.py             |   50 +
 .../package/scripts/params_windows.py           |   53 +
 .../package/scripts/service_check.py            |  166 +
 .../package/scripts/service_mapping.py          |   22 +
 .../package/scripts/split_points.py             |  236 +
 .../AMBARI_METRICS/package/scripts/status.py    |   46 +
 .../package/scripts/status_params.py            |   39 +
 .../package/templates/ams.conf.j2               |   35 +
 .../templates/ams_collector_jaas.conf.j2        |   26 +
 .../templates/ams_zookeeper_jaas.conf.j2        |   26 +
 .../hadoop-metrics2-hbase.properties.j2         |   63 +
 .../package/templates/hbase_client_jaas.conf.j2 |   23 +
 .../templates/hbase_grant_permissions.j2        |   39 +
 .../package/templates/hbase_master_jaas.conf.j2 |   26 +
 .../templates/hbase_regionserver_jaas.conf.j2   |   26 +
 .../package/templates/metric_groups.conf.j2     |   37 +
 .../package/templates/metric_monitor.ini.j2     |   31 +
 .../package/templates/regionservers.j2          |   20 +
 .../package/templates/smoketest_metrics.json.j2 |   15 +
 .../BigInsights/4.2/services/FLUME/alerts.json  |   27 +
 .../services/FLUME/configuration/flume-conf.xml |   37 +
 .../services/FLUME/configuration/flume-env.xml  |   98 +
 .../4.2/services/FLUME/kerberos.json            |   51 +
 .../BigInsights/4.2/services/FLUME/metainfo.xml |   69 +
 .../BigInsights/4.2/services/FLUME/metrics.json |  430 +
 .../package/alerts/alert_flume_agent_status.py  |  106 +
 .../4.2/services/FLUME/package/scripts/flume.py |  229 +
 .../FLUME/package/scripts/flume_check.py        |   40 +
 .../FLUME/package/scripts/flume_handler.py      |  145 +
 .../FLUME/package/scripts/flume_upgrade.py      |   88 +
 .../services/FLUME/package/scripts/params.py    |  101 +
 .../FLUME/package/scripts/params_linux.py       |   30 +
 .../templates/flume-metrics2.properties.j2      |   26 +
 .../FLUME/package/templates/flume.conf.j2       |   24 +
 .../FLUME/package/templates/log4j.properties.j2 |   67 +
 .../BigInsights/4.2/services/HBASE/alerts.json  |  157 +
 .../services/HBASE/configuration/hbase-env.xml  |  196 +
 .../configuration/hbase-javaopts-properties.xml |   28 +
 .../HBASE/configuration/hbase-log4j.xml         |  146 +
 .../HBASE/configuration/hbase-policy.xml        |   53 +
 .../services/HBASE/configuration/hbase-site.xml |  759 ++
 .../HBASE/configuration/ranger-hbase-audit.xml  |  177 +
 .../ranger-hbase-plugin-properties.xml          |  203 +
 .../ranger-hbase-policymgr-ssl.xml              |   61 +
 .../configuration/ranger-hbase-security.xml     |   68 +
 .../4.2/services/HBASE/kerberos.json            |  188 +
 .../BigInsights/4.2/services/HBASE/metainfo.xml |  176 +
 .../BigInsights/4.2/services/HBASE/metrics.json | 9420 ++++++++++++++++++
 .../HBASE/package/files/draining_servers.rb     |  164 +
 .../HBASE/package/files/hbaseSmokeVerify.sh     |   34 +
 .../services/HBASE/package/scripts/__init__.py  |   19 +
 .../services/HBASE/package/scripts/functions.py |   54 +
 .../4.2/services/HBASE/package/scripts/hbase.py |  236 +
 .../HBASE/package/scripts/hbase_client.py       |   72 +
 .../HBASE/package/scripts/hbase_decommission.py |   74 +
 .../HBASE/package/scripts/hbase_master.py       |  131 +
 .../HBASE/package/scripts/hbase_regionserver.py |  132 +
 .../package/scripts/hbase_restgatewayserver.py  |   83 +
 .../HBASE/package/scripts/hbase_service.py      |   51 +
 .../HBASE/package/scripts/hbase_upgrade.py      |   37 +
 .../services/HBASE/package/scripts/params.py    |  363 +
 .../package/scripts/phoenix_queryserver.py      |   77 +
 .../HBASE/package/scripts/phoenix_service.py    |   50 +
 .../HBASE/package/scripts/service_check.py      |   79 +
 .../HBASE/package/scripts/setup_ranger_hbase.py |   84 +
 .../HBASE/package/scripts/status_params.py      |   46 +
 .../services/HBASE/package/scripts/upgrade.py   |   52 +
 ...-metrics2-hbase.properties-GANGLIA-MASTER.j2 |  109 +
 ...doop-metrics2-hbase.properties-GANGLIA-RS.j2 |  107 +
 .../HBASE/package/templates/hbase-smoke.sh.j2   |   44 +
 .../HBASE/package/templates/hbase.conf.j2       |   35 +
 .../package/templates/hbase_client_jaas.conf.j2 |   23 +
 .../templates/hbase_grant_permissions.j2        |   39 +
 .../package/templates/hbase_master_jaas.conf.j2 |   26 +
 .../templates/hbase_queryserver_jaas.conf.j2    |   26 +
 .../templates/hbase_regionserver_jaas.conf.j2   |   26 +
 .../package/templates/hbase_rest_jaas.conf.j2   |   26 +
 .../HBASE/package/templates/regionservers.j2    |   20 +
 .../services/HBASE/quicklinks/quicklinks.json   |  121 +
 .../BigInsights/4.2/services/HBASE/widgets.json |  510 +
 .../BigInsights/4.2/services/HDFS/alerts.json   |  760 ++
 .../services/HDFS/configuration/core-site.xml   |  233 +
 .../services/HDFS/configuration/hadoop-env.xml  |  394 +
 .../HDFS/configuration/hadoop-policy.xml        |  134 +
 .../services/HDFS/configuration/hdfs-log4j.xml  |  225 +
 .../services/HDFS/configuration/hdfs-site.xml   |  683 ++
 .../HDFS/configuration/ranger-hdfs-audit.xml    |  177 +
 .../ranger-hdfs-plugin-properties.xml           |  213 +
 .../configuration/ranger-hdfs-policymgr-ssl.xml |   61 +
 .../HDFS/configuration/ranger-hdfs-security.xml |   64 +
 .../services/HDFS/configuration/ssl-client.xml  |   58 +
 .../services/HDFS/configuration/ssl-server.xml  |   64 +
 .../BigInsights/4.2/services/HDFS/kerberos.json |  230 +
 .../BigInsights/4.2/services/HDFS/metainfo.xml  |  283 +
 .../BigInsights/4.2/services/HDFS/metrics.json  | 7899 +++++++++++++++
 .../package/alerts/alert_checkpoint_time.py     |  223 +
 .../alerts/alert_datanode_unmounted_data_dir.py |  164 +
 .../package/alerts/alert_ha_namenode_health.py  |  261 +
 .../package/alerts/alert_upgrade_finalized.py   |  171 +
 .../HDFS/package/files/checkForFormat.sh        |   71 +
 .../services/HDFS/package/files/checkWebUI.py   |   54 +
 .../services/HDFS/package/scripts/__init__.py   |   20 +
 .../scripts/balancer-emulator/hdfs-command.py   |   45 +
 .../services/HDFS/package/scripts/datanode.py   |  158 +
 .../HDFS/package/scripts/datanode_upgrade.py    |  141 +
 .../4.2/services/HDFS/package/scripts/hdfs.py   |  131 +
 .../HDFS/package/scripts/hdfs_client.py         |  113 +
 .../HDFS/package/scripts/hdfs_datanode.py       |   76 +
 .../HDFS/package/scripts/hdfs_namenode.py       |  488 +
 .../HDFS/package/scripts/hdfs_nfsgateway.py     |   72 +
 .../HDFS/package/scripts/hdfs_rebalance.py      |  130 +
 .../HDFS/package/scripts/hdfs_snamenode.py      |   53 +
 .../HDFS/package/scripts/install_params.py      |   39 +
 .../HDFS/package/scripts/journalnode.py         |  169 +
 .../HDFS/package/scripts/journalnode_upgrade.py |  140 +
 .../services/HDFS/package/scripts/namenode.py   |  334 +
 .../HDFS/package/scripts/namenode_ha_state.py   |  216 +
 .../HDFS/package/scripts/namenode_upgrade.py    |  269 +
 .../services/HDFS/package/scripts/nfsgateway.py |  137 +
 .../4.2/services/HDFS/package/scripts/params.py |  463 +
 .../HDFS/package/scripts/service_check.py       |  109 +
 .../HDFS/package/scripts/setup_ranger_hdfs.py   |   90 +
 .../services/HDFS/package/scripts/snamenode.py  |  142 +
 .../HDFS/package/scripts/status_params.py       |   44 +
 .../4.2/services/HDFS/package/scripts/utils.py  |  407 +
 .../services/HDFS/package/scripts/zkfc_slave.py |  150 +
 .../package/templates/exclude_hosts_list.j2     |   21 +
 .../HDFS/package/templates/hdfs.conf.j2         |   35 +
 .../services/HDFS/package/templates/slaves.j2   |   21 +
 .../services/HDFS/quicklinks/quicklinks.json    |   92 +
 .../BigInsights/4.2/services/HDFS/widgets.json  |  644 ++
 .../BigInsights/4.2/services/HIVE/alerts.json   |  111 +
 .../services/HIVE/configuration/hcat-env.xml    |   57 +
 .../services/HIVE/configuration/hive-env.xml    |  331 +
 .../HIVE/configuration/hive-exec-log4j.xml      |  121 +
 .../services/HIVE/configuration/hive-log4j.xml  |  139 +
 .../services/HIVE/configuration/hive-site.xml   | 1809 ++++
 .../HIVE/configuration/hiveserver2-site.xml     |   74 +
 .../HIVE/configuration/ranger-hive-audit.xml    |  177 +
 .../ranger-hive-plugin-properties.xml           |  192 +
 .../configuration/ranger-hive-policymgr-ssl.xml |   61 +
 .../HIVE/configuration/ranger-hive-security.xml |   69 +
 .../services/HIVE/configuration/webhcat-env.xml |   54 +
 .../HIVE/configuration/webhcat-log4j.xml        |   81 +
 .../HIVE/configuration/webhcat-site.xml         |  163 +
 .../HIVE/etc/hive-schema-0.12.0.mysql.sql       |  777 ++
 .../HIVE/etc/hive-schema-0.12.0.oracle.sql      |  718 ++
 .../HIVE/etc/hive-schema-0.12.0.postgres.sql    | 1406 +++
 .../HIVE/etc/hive-schema-0.13.0.mysql.sql       |  889 ++
 .../HIVE/etc/hive-schema-0.13.0.oracle.sql      |  835 ++
 .../HIVE/etc/hive-schema-0.13.0.postgres.sql    | 1538 +++
 .../HIVE/etc/hive-schema-0.14.0.mysql.sql       |  889 ++
 .../HIVE/etc/hive-schema-0.14.0.oracle.sql      |  833 ++
 .../HIVE/etc/hive-schema-0.14.0.postgres.sql    | 1541 +++
 .../etc/upgrade-0.12.0-to-0.13.0.oracle.sql     |  165 +
 .../services/HIVE/etc/upgrade-0.13.0.oracle.sql |   38 +
 .../BigInsights/4.2/services/HIVE/kerberos.json |  114 +
 .../BigInsights/4.2/services/HIVE/metainfo.xml  |  351 +
 .../HIVE/package/alerts/alert_hive_metastore.py |  193 +
 .../package/alerts/alert_hive_thrift_port.py    |  269 +
 .../HIVE/package/alerts/alert_webhcat_server.py |  228 +
 .../package/etc/hive-schema-0.12.0.mysql.sql    |  777 ++
 .../package/etc/hive-schema-0.12.0.oracle.sql   |  718 ++
 .../package/etc/hive-schema-0.12.0.postgres.sql | 1406 +++
 .../services/HIVE/package/files/addMysqlUser.sh |   37 +
 .../services/HIVE/package/files/hcatSmoke.sh    |   41 +
 .../services/HIVE/package/files/hiveSmoke.sh    |   24 +
 .../services/HIVE/package/files/hiveserver2.sql |   23 +
 .../HIVE/package/files/hiveserver2Smoke.sh      |   32 +
 .../4.2/services/HIVE/package/files/pigSmoke.sh |   18 +
 .../HIVE/package/files/removeMysqlUser.sh       |   33 +
 .../HIVE/package/files/startMetastore.sh        |   25 +
 .../HIVE/package/files/templetonSmoke.sh        |   93 +
 .../services/HIVE/package/scripts/__init__.py   |   19 +
 .../4.2/services/HIVE/package/scripts/hcat.py   |   73 +
 .../HIVE/package/scripts/hcat_client.py         |   50 +
 .../HIVE/package/scripts/hcat_service_check.py  |   78 +
 .../4.2/services/HIVE/package/scripts/hive.py   |  432 +
 .../HIVE/package/scripts/hive_client.py         |   83 +
 .../HIVE/package/scripts/hive_metastore.py      |  234 +
 .../HIVE/package/scripts/hive_server.py         |  177 +
 .../HIVE/package/scripts/hive_server_upgrade.py |  138 +
 .../HIVE/package/scripts/hive_service.py        |  143 +
 .../HIVE/package/scripts/mysql_server.py        |   64 +
 .../HIVE/package/scripts/mysql_service.py       |   57 +
 .../HIVE/package/scripts/mysql_users.py         |   70 +
 .../HIVE/package/scripts/mysql_utils.py         |   35 +
 .../4.2/services/HIVE/package/scripts/params.py |   29 +
 .../HIVE/package/scripts/params_linux.py        |  565 ++
 .../HIVE/package/scripts/params_windows.py      |   76 +
 .../HIVE/package/scripts/postgresql_server.py   |  109 +
 .../HIVE/package/scripts/postgresql_service.py  |   39 +
 .../HIVE/package/scripts/service_check.py       |   91 +
 .../HIVE/package/scripts/setup_ranger_hive.py   |   77 +
 .../HIVE/package/scripts/status_params.py       |   96 +
 .../services/HIVE/package/scripts/webhcat.py    |  111 +
 .../HIVE/package/scripts/webhcat_server.py      |  147 +
 .../HIVE/package/scripts/webhcat_service.py     |   75 +
 .../package/scripts/webhcat_service_check.py    |  120 +
 .../HIVE/package/templates/hive.conf.j2         |   36 +
 .../package/templates/startHiveserver2.sh.j2    |   24 +
 .../package/templates/templeton_smoke.pig.j2    |   24 +
 .../BigInsights/4.2/services/KAFKA/alerts.json  |   32 +
 .../KAFKA/configuration/kafka-broker.xml        |  411 +
 .../services/KAFKA/configuration/kafka-env.xml  |   80 +
 .../KAFKA/configuration/kafka-log4j.xml         |  117 +
 .../4.2/services/KAFKA/kerberos.json            |   50 +
 .../BigInsights/4.2/services/KAFKA/metainfo.xml |   84 +
 .../BigInsights/4.2/services/KAFKA/metrics.json |  239 +
 .../4.2/services/KAFKA/package/scripts/kafka.py |  243 +
 .../KAFKA/package/scripts/kafka_broker.py       |  107 +
 .../services/KAFKA/package/scripts/params.py    |  157 +
 .../KAFKA/package/scripts/service_check.py      |   65 +
 .../KAFKA/package/scripts/status_params.py      |   26 +
 .../services/KAFKA/package/scripts/upgrade.py   |   78 +
 .../4.2/services/KAFKA/package/scripts/utils.py |   38 +
 .../KAFKA/package/templates/kafka.conf.j2       |   35 +
 .../package/templates/kafka_client_jaas.conf.j2 |   29 +
 .../KAFKA/package/templates/kafka_jaas.conf.j2  |   41 +
 .../package/templates/tools-log4j.properties.j2 |   21 +
 .../KERBEROS/configuration/kerberos-env.xml     |  308 +
 .../KERBEROS/configuration/krb5-conf.xml        |  109 +
 .../4.2/services/KERBEROS/kerberos.json         |   17 +
 .../4.2/services/KERBEROS/metainfo.xml          |  147 +
 .../KERBEROS/package/scripts/kerberos_client.py |   79 +
 .../KERBEROS/package/scripts/kerberos_common.py |  473 +
 .../KERBEROS/package/scripts/kerberos_server.py |  141 +
 .../services/KERBEROS/package/scripts/params.py |  200 +
 .../KERBEROS/package/scripts/service_check.py   |   81 +
 .../KERBEROS/package/scripts/status_params.py   |   32 +
 .../services/KERBEROS/package/scripts/utils.py  |  105 +
 .../KERBEROS/package/templates/kadm5_acl.j2     |   20 +
 .../KERBEROS/package/templates/kdc_conf.j2      |   30 +
 .../KERBEROS/package/templates/krb5_conf.j2     |   55 +
 .../BigInsights/4.2/services/KNOX/alerts.json   |   32 +
 .../KNOX/configuration/gateway-log4j.xml        |   83 +
 .../KNOX/configuration/gateway-site.xml         |   72 +
 .../services/KNOX/configuration/knox-env.xml    |   76 +
 .../services/KNOX/configuration/ldap-log4j.xml  |   66 +
 .../KNOX/configuration/ranger-knox-audit.xml    |  177 +
 .../ranger-knox-plugin-properties.xml           |  210 +
 .../configuration/ranger-knox-policymgr-ssl.xml |   61 +
 .../KNOX/configuration/ranger-knox-security.xml |   59 +
 .../services/KNOX/configuration/topology.xml    |  181 +
 .../services/KNOX/configuration/users-ldif.xml  |  138 +
 .../BigInsights/4.2/services/KNOX/kerberos.json |   69 +
 .../BigInsights/4.2/services/KNOX/metainfo.xml  |   92 +
 .../KNOX/package/files/validateKnoxStatus.py    |   43 +
 .../4.2/services/KNOX/package/scripts/knox.py   |  162 +
 .../KNOX/package/scripts/knox_gateway.py        |  307 +
 .../services/KNOX/package/scripts/knox_ldap.py  |   54 +
 .../4.2/services/KNOX/package/scripts/ldap.py   |   55 +
 .../4.2/services/KNOX/package/scripts/params.py |  459 +
 .../KNOX/package/scripts/service_check.py       |   92 +
 .../KNOX/package/scripts/setup_ranger_knox.py   |   77 +
 .../KNOX/package/scripts/status_params.py       |   50 +
 .../services/KNOX/package/scripts/upgrade.py    |   93 +
 .../package/templates/krb5JAASLogin.conf.j2     |   30 +
 .../BigInsights/4.2/services/OOZIE/alerts.json  |   45 +
 .../services/OOZIE/configuration/oozie-env.xml  |  189 +
 .../OOZIE/configuration/oozie-log4j.xml         |  146 +
 .../services/OOZIE/configuration/oozie-site.xml |  382 +
 .../4.2/services/OOZIE/kerberos.json            |   70 +
 .../BigInsights/4.2/services/OOZIE/metainfo.xml |  172 +
 .../package/alerts/alert_check_oozie_server.py  |  211 +
 .../services/OOZIE/package/files/oozieSmoke2.sh |   90 +
 .../files/prepareOozieHdfsDirectories.sh        |   46 +
 .../OOZIE/package/files/wrap_ooziedb.sh         |   31 +
 .../4.2/services/OOZIE/package/scripts/oozie.py |  279 +
 .../OOZIE/package/scripts/oozie_client.py       |   76 +
 .../OOZIE/package/scripts/oozie_server.py       |  193 +
 .../package/scripts/oozie_server_upgrade.py     |  300 +
 .../OOZIE/package/scripts/oozie_service.py      |  124 +
 .../services/OOZIE/package/scripts/params.py    |  262 +
 .../OOZIE/package/scripts/service_check.py      |  140 +
 .../OOZIE/package/scripts/status_params.py      |   47 +
 .../OOZIE/package/templates/adminusers.txt.j2   |   28 +
 .../package/templates/oozie-log4j.properties.j2 |   93 +
 .../services/OOZIE/quicklinks/quicklinks.json   |   48 +
 .../4.2/services/PIG/configuration/pig-env.xml  |   38 +
 .../services/PIG/configuration/pig-log4j.xml    |   65 +
 .../PIG/configuration/pig-properties.xml        |  631 ++
 .../BigInsights/4.2/services/PIG/kerberos.json  |   17 +
 .../BigInsights/4.2/services/PIG/metainfo.xml   |   87 +
 .../4.2/services/PIG/package/files/pigSmoke.sh  |   18 +
 .../4.2/services/PIG/package/scripts/params.py  |   25 +
 .../PIG/package/scripts/params_linux.py         |   88 +
 .../4.2/services/PIG/package/scripts/pig.py     |   61 +
 .../services/PIG/package/scripts/pig_client.py  |   59 +
 .../PIG/package/scripts/service_check.py        |  123 +
 .../BigInsights/4.2/services/RANGER/alerts.json |   74 +
 .../RANGER/configuration/admin-properties.xml   |  262 +
 .../RANGER/configuration/ranger-admin-site.xml  |  517 +
 .../RANGER/configuration/ranger-env.xml         |  438 +
 .../RANGER/configuration/ranger-site.xml        |   69 +
 .../RANGER/configuration/ranger-ugsync-site.xml |  450 +
 .../configuration/usersync-properties.xml       |  109 +
 .../4.2/services/RANGER/metainfo.xml            |  107 +
 .../alerts/alert_ranger_admin_passwd_check.py   |  180 +
 .../services/RANGER/package/scripts/params.py   |  208 +
 .../RANGER/package/scripts/ranger_admin.py      |  194 +
 .../RANGER/package/scripts/ranger_service.py    |   47 +
 .../RANGER/package/scripts/ranger_usersync.py   |   82 +
 .../RANGER/package/scripts/service_check.py     |   51 +
 .../RANGER/package/scripts/setup_ranger.py      |  137 +
 .../RANGER/package/scripts/setup_ranger_xml.py  |  467 +
 .../services/RANGER/package/scripts/upgrade.py  |   30 +
 .../4.2/services/RANGER/themes/theme.json       | 1397 +++
 .../4.2/services/RANGER_KMS/alerts.json         |   32 +
 .../RANGER_KMS/configuration/dbks-site.xml      |   90 +
 .../RANGER_KMS/configuration/kms-env.xml        |   51 +
 .../RANGER_KMS/configuration/kms-log4j.xml      |   68 +
 .../RANGER_KMS/configuration/kms-properties.xml |   93 +
 .../RANGER_KMS/configuration/kms-site.xml       |  129 +
 .../configuration/ranger-kms-audit.xml          |  137 +
 .../configuration/ranger-kms-policymgr-ssl.xml  |   67 +
 .../configuration/ranger-kms-security.xml       |   59 +
 .../configuration/ranger-kms-site.xml           |   59 +
 .../4.2/services/RANGER_KMS/kerberos.json       |   49 +
 .../4.2/services/RANGER_KMS/metainfo.xml        |   89 +
 .../services/RANGER_KMS/package/scripts/kms.py  |  489 +
 .../RANGER_KMS/package/scripts/kms_server.py    |   96 +
 .../RANGER_KMS/package/scripts/kms_service.py   |   49 +
 .../RANGER_KMS/package/scripts/params.py        |  246 +
 .../RANGER_KMS/package/scripts/service_check.py |   41 +
 .../RANGER_KMS/package/scripts/upgrade.py       |   29 +
 .../SLIDER/configuration/slider-client.xml      |   60 +
 .../SLIDER/configuration/slider-env.xml         |   43 +
 .../SLIDER/configuration/slider-log4j.xml       |   92 +
 .../4.2/services/SLIDER/kerberos.json           |   17 +
 .../4.2/services/SLIDER/metainfo.xml            |  128 +
 .../SLIDER/package/files/hbaseSmokeVerify.sh    |   34 +
 .../services/SLIDER/package/scripts/__init__.py |   19 +
 .../services/SLIDER/package/scripts/params.py   |   65 +
 .../SLIDER/package/scripts/params_linux.py      |   75 +
 .../SLIDER/package/scripts/params_windows.py    |   45 +
 .../SLIDER/package/scripts/service_check.py     |   59 +
 .../services/SLIDER/package/scripts/slider.py   |   97 +
 .../SLIDER/package/scripts/slider_client.py     |   71 +
 .../package/templates/storm-slider-env.sh.j2    |   38 +
 .../services/SOLR/configuration/solr-env.xml    |  234 +
 .../services/SOLR/configuration/solr-log4j.xml  |   82 +
 .../services/SOLR/configuration/solr-site.xml   |   44 +
 .../BigInsights/4.2/services/SOLR/kerberos.json |   53 +
 .../BigInsights/4.2/services/SOLR/metainfo.xml  |   82 +
 .../services/SOLR/package/scripts/__init__.py   |   19 +
 .../4.2/services/SOLR/package/scripts/params.py |  205 +
 .../SOLR/package/scripts/service_check.py       |   61 +
 .../4.2/services/SOLR/package/scripts/solr.py   |  100 +
 .../SOLR/package/scripts/solr_client.py         |   36 +
 .../SOLR/package/scripts/solr_server.py         |  107 +
 .../SOLR/package/scripts/solr_service.py        |   72 +
 .../SOLR/package/scripts/solr_upgrade.py        |  135 +
 .../SOLR/package/scripts/status_params.py       |   32 +
 .../services/SOLR/package/templates/solr.xml.j2 |   51 +
 .../SOLR/package/templates/solr_jaas.conf.j2    |   26 +
 .../BigInsights/4.2/services/SPARK/alerts.json  |   32 +
 .../SPARK/configuration/spark-defaults.xml      |  159 +
 .../services/SPARK/configuration/spark-env.xml  |  114 +
 .../configuration/spark-javaopts-properties.xml |   27 +
 .../SPARK/configuration/spark-log4j.xml         |   42 +
 .../configuration/spark-metrics-properties.xml  |  160 +
 .../4.2/services/SPARK/kerberos.json            |   55 +
 .../BigInsights/4.2/services/SPARK/metainfo.xml |  197 +
 .../SPARK/package/scripts/job_history_server.py |  167 +
 .../services/SPARK/package/scripts/params.py    |  216 +
 .../SPARK/package/scripts/service_check.py      |  132 +
 .../4.2/services/SPARK/package/scripts/spark.py |  353 +
 .../SPARK/package/scripts/spark_check.py        |   76 +
 .../SPARK/package/scripts/spark_client.py       |   62 +
 .../SPARK/package/scripts/status_params.py      |   36 +
 .../SPARK/package/scripts/thrift_server.py      |  119 +
 .../package/templates/spark-defaults.conf.j2    |   43 +
 .../services/SPARK/quicklinks/quicklinks.json   |   47 +
 .../services/SQOOP/configuration/sqoop-env.xml  |   59 +
 .../BigInsights/4.2/services/SQOOP/metainfo.xml |   95 +
 .../services/SQOOP/package/scripts/__init__.py  |   19 +
 .../services/SQOOP/package/scripts/params.py    |   95 +
 .../SQOOP/package/scripts/service_check.py      |   44 +
 .../4.2/services/SQOOP/package/scripts/sqoop.py |   85 +
 .../SQOOP/package/scripts/sqoop_client.py       |   57 +
 .../4.2/services/SYSTEMML/metainfo.xml          |   77 +
 .../SYSTEMML/package/scripts/__init__.py        |   19 +
 .../services/SYSTEMML/package/scripts/params.py |   40 +
 .../SYSTEMML/package/scripts/service_check.py   |   43 +
 .../SYSTEMML/package/scripts/systemml_client.py |   49 +
 .../services/TITAN/configuration/titan-env.xml  |   46 +
 .../TITAN/configuration/titan-hbase-solr.xml    |   66 +
 .../TITAN/configuration/titan-log4j.xml         |   65 +
 .../4.2/services/TITAN/kerberos.json            |   17 +
 .../BigInsights/4.2/services/TITAN/metainfo.xml |   88 +
 .../TITAN/package/files/titanSmoke.groovy       |   20 +
 .../services/TITAN/package/scripts/params.py    |  128 +
 .../TITAN/package/scripts/service_check.py      |   64 +
 .../4.2/services/TITAN/package/scripts/titan.py |   70 +
 .../TITAN/package/scripts/titan_client.py       |   58 +
 .../4.2/services/YARN/MAPREDUCE2_metrics.json   | 2596 +++++
 .../4.2/services/YARN/YARN_metrics.json         | 3486 +++++++
 .../4.2/services/YARN/YARN_widgets.json         |  617 ++
 .../BigInsights/4.2/services/YARN/alerts.json   |  414 +
 .../YARN/configuration-mapred/mapred-env.xml    |   96 +
 .../YARN/configuration-mapred/mapred-site.xml   |  536 +
 .../YARN/configuration/capacity-scheduler.xml   |  157 +
 .../YARN/configuration/ranger-yarn-audit.xml    |  177 +
 .../ranger-yarn-plugin-properties.xml           |   80 +
 .../configuration/ranger-yarn-policymgr-ssl.xml |   61 +
 .../YARN/configuration/ranger-yarn-security.xml |   59 +
 .../services/YARN/configuration/yarn-env.xml    |  250 +
 .../services/YARN/configuration/yarn-log4j.xml  |   83 +
 .../services/YARN/configuration/yarn-site.xml   | 1054 ++
 .../BigInsights/4.2/services/YARN/kerberos.json |  224 +
 .../BigInsights/4.2/services/YARN/metainfo.xml  |  286 +
 .../package/alerts/alert_nodemanager_health.py  |  201 +
 .../alerts/alert_nodemanagers_summary.py        |  197 +
 .../files/validateYarnComponentStatus.py        |  170 +
 .../services/YARN/package/scripts/__init__.py   |   20 +
 .../scripts/application_timeline_server.py      |  139 +
 .../YARN/package/scripts/historyserver.py       |  158 +
 .../package/scripts/mapred_service_check.py     |   80 +
 .../YARN/package/scripts/mapreduce2_client.py   |   56 +
 .../YARN/package/scripts/nodemanager.py         |  144 +
 .../YARN/package/scripts/nodemanager_upgrade.py |   76 +
 .../4.2/services/YARN/package/scripts/params.py |  412 +
 .../YARN/package/scripts/resourcemanager.py     |  181 +
 .../services/YARN/package/scripts/service.py    |   81 +
 .../YARN/package/scripts/service_check.py       |   91 +
 .../YARN/package/scripts/setup_ranger_yarn.py   |   67 +
 .../YARN/package/scripts/status_params.py       |   44 +
 .../4.2/services/YARN/package/scripts/yarn.py   |  445 +
 .../YARN/package/scripts/yarn_client.py         |   56 +
 .../package/templates/container-executor.cfg.j2 |   40 +
 .../package/templates/exclude_hosts_list.j2     |   21 +
 .../YARN/package/templates/mapreduce.conf.j2    |   35 +
 .../package/templates/taskcontroller.cfg.j2     |   38 +
 .../YARN/package/templates/yarn.conf.j2         |   35 +
 .../YARN/quicklinks-mapred/quicklinks.json      |   92 +
 .../services/YARN/quicklinks/quicklinks.json    |   92 +
 .../4.2/services/ZOOKEEPER/alerts.json          |   58 +
 .../ZOOKEEPER/configuration/zoo.cfg.xml         |   84 +
 .../ZOOKEEPER/configuration/zookeeper-env.xml   |   73 +
 .../ZOOKEEPER/configuration/zookeeper-log4j.xml |  101 +
 .../4.2/services/ZOOKEEPER/kerberos.json        |   39 +
 .../4.2/services/ZOOKEEPER/metainfo.xml         |   91 +
 .../services/ZOOKEEPER/package/files/zkEnv.sh   |   96 +
 .../ZOOKEEPER/package/files/zkServer.sh         |  120 +
 .../ZOOKEEPER/package/files/zkService.sh        |   26 +
 .../services/ZOOKEEPER/package/files/zkSmoke.sh |   93 +
 .../ZOOKEEPER/package/scripts/__init__.py       |   20 +
 .../ZOOKEEPER/package/scripts/params.py         |   96 +
 .../ZOOKEEPER/package/scripts/service_check.py  |   53 +
 .../ZOOKEEPER/package/scripts/status_params.py  |   43 +
 .../ZOOKEEPER/package/scripts/zookeeper.py      |  114 +
 .../package/scripts/zookeeper_client.py         |   71 +
 .../package/scripts/zookeeper_server.py         |  161 +
 .../package/scripts/zookeeper_service.py        |   58 +
 .../package/templates/configuration.xsl.j2      |   42 +
 .../ZOOKEEPER/package/templates/zoo.cfg.j2      |   53 +
 .../templates/zookeeper_client_jaas.conf.j2     |   23 +
 .../package/templates/zookeeper_jaas.conf.j2    |   26 +
 .../BigInsights/4.2/services/stack_advisor.py   |  105 +
 .../upgrades/nonrolling-upgrade-to-hdp-2.6.xml  |   35 +-
 1139 files changed, 232939 insertions(+), 23 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/blueprints/multinode-default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/blueprints/multinode-default.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/blueprints/multinode-default.json
new file mode 100755
index 0000000..cc1e217
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/blueprints/multinode-default.json
@@ -0,0 +1,182 @@
+{
+    "configurations" : [
+        {
+            "nagios-env" : {
+                "nagios_contact" : "admin@localhost"
+            }
+        }
+    ],
+    "host_groups" : [
+        {
+            "name" : "master_1",
+            "components" : [
+                {
+                    "name" : "NAMENODE"
+                },
+                {
+                    "name" : "ZOOKEEPER_SERVER"
+                },
+                {
+                    "name" : "HBASE_MASTER"
+                },
+                {
+                    "name" : "GANGLIA_SERVER"
+                },
+                {
+                    "name" : "HDFS_CLIENT"
+                },
+                {
+                    "name" : "YARN_CLIENT"
+                },
+                {
+                    "name" : "HCAT"
+                },
+                {
+                    "name" : "GANGLIA_MONITOR"
+                }
+            ],
+            "cardinality" : "1"
+        },
+        {
+            "name" : "master_2",
+            "components" : [
+
+                {
+                    "name" : "ZOOKEEPER_CLIENT"
+                },
+                {
+                    "name" : "HISTORYSERVER"
+                },
+                {
+                    "name" : "HIVE_SERVER"
+                },
+                {
+                    "name" : "SECONDARY_NAMENODE"
+                },
+                {
+                    "name" : "HIVE_METASTORE"
+                },
+                {
+                    "name" : "HDFS_CLIENT"
+                },
+                {
+                    "name" : "HIVE_CLIENT"
+                },
+                {
+                    "name" : "YARN_CLIENT"
+                },
+                {
+                    "name" : "MYSQL_SERVER"
+                },
+                {
+                    "name" : "POSTGRESQL_SERVER"
+                },
+                {
+                    "name" : "GANGLIA_MONITOR"
+                },
+                {
+                    "name" : "WEBHCAT_SERVER"
+                }
+            ],
+            "cardinality" : "1"
+        },
+        {
+            "name" : "master_3",
+            "components" : [
+                {
+                    "name" : "RESOURCEMANAGER"
+                },
+                {
+                    "name" : "ZOOKEEPER_SERVER"
+                },
+                {
+                    "name" : "GANGLIA_MONITOR"
+                }
+            ],
+            "cardinality" : "1"
+        },
+        {
+            "name" : "master_4",
+            "components" : [
+                {
+                    "name" : "OOZIE_SERVER"
+                },
+                {
+                    "name" : "ZOOKEEPER_SERVER"
+                },
+                {
+                    "name" : "GANGLIA_MONITOR"
+                }
+            ],
+            "cardinality" : "1"
+        },
+        {
+            "name" : "slave",
+            "components" : [
+                {
+                    "name" : "HBASE_REGIONSERVER"
+                },
+                {
+                    "name" : "NODEMANAGER"
+                },
+                {
+                    "name" : "DATANODE"
+                },
+                {
+                    "name" : "GANGLIA_MONITOR"
+                }
+            ],
+            "cardinality" : "${slavesCount}"
+        },
+        {
+            "name" : "gateway",
+            "components" : [
+                {
+                    "name" : "AMBARI_SERVER"
+                },
+                {
+                    "name" : "NAGIOS_SERVER"
+                },
+                {
+                    "name" : "ZOOKEEPER_CLIENT"
+                },
+                {
+                    "name" : "PIG"
+                },
+                {
+                    "name" : "OOZIE_CLIENT"
+                },
+                {
+                    "name" : "HBASE_CLIENT"
+                },
+                {
+                    "name" : "HCAT"
+                },
+                {
+                    "name" : "SQOOP"
+                },
+                {
+                    "name" : "HDFS_CLIENT"
+                },
+                {
+                    "name" : "HIVE_CLIENT"
+                },
+                {
+                    "name" : "YARN_CLIENT"
+                },
+                {
+                    "name" : "MAPREDUCE2_CLIENT"
+                },
+                {
+                    "name" : "GANGLIA_MONITOR"
+                }
+            ],
+            "cardinality" : "1"
+        }
+    ],
+    "Blueprints" : {
+        "blueprint_name" : "blueprint-multinode-default",
+        "stack_name" : "BigInsights",
+        "stack_version" : "4.0"
+    }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/blueprints/singlenode-default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/blueprints/singlenode-default.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/blueprints/singlenode-default.json
new file mode 100755
index 0000000..6a51adc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/blueprints/singlenode-default.json
@@ -0,0 +1,133 @@
+{
+    "configurations" : [
+        {
+            "nagios-env" : {
+                "nagios_contact" : "admin@localhost"
+            }
+        }
+    ],
+    "host_groups" : [
+        {
+            "name" : "host_group_1",
+            "components" : [
+                {
+                    "name" : "STORM_REST_API"
+                },
+                {
+                    "name" : "PIG"
+                },
+                {
+                    "name" : "HISTORYSERVER"
+                },
+                {
+                    "name" : "HBASE_REGIONSERVER"
+                },
+                {
+                    "name" : "OOZIE_CLIENT"
+                },
+                {
+                    "name" : "HBASE_CLIENT"
+                },
+                {
+                    "name" : "NAMENODE"
+                },
+                {
+                    "name" : "SUPERVISOR"
+                },
+                {
+                    "name" : "FALCON_SERVER"
+                },
+                {
+                    "name" : "HCAT"
+                },
+                {
+                    "name" : "AMBARI_SERVER"
+                },
+                {
+                    "name" : "APP_TIMELINE_SERVER"
+                },
+                {
+                    "name" : "HDFS_CLIENT"
+                },
+                {
+                    "name" : "HIVE_CLIENT"
+                },
+                {
+                    "name" : "NODEMANAGER"
+                },
+                {
+                    "name" : "DATANODE"
+                },
+                {
+                    "name" : "WEBHCAT_SERVER"
+                },
+                {
+                    "name" : "RESOURCEMANAGER"
+                },
+                {
+                    "name" : "ZOOKEEPER_SERVER"
+                },
+                {
+                    "name" : "ZOOKEEPER_CLIENT"
+                },
+                {
+                    "name" : "STORM_UI_SERVER"
+                },
+                {
+                    "name" : "HBASE_MASTER"
+                },
+                {
+                    "name" : "HIVE_SERVER"
+                },
+                {
+                    "name" : "OOZIE_SERVER"
+                },
+                {
+                    "name" : "FALCON_CLIENT"
+                },
+                {
+                    "name" : "NAGIOS_SERVER"
+                },
+                {
+                    "name" : "SECONDARY_NAMENODE"
+                },
+                {
+                    "name" : "HIVE_METASTORE"
+                },
+                {
+                    "name" : "GANGLIA_SERVER"
+                },
+                {
+                    "name" : "SQOOP"
+                },
+                {
+                    "name" : "YARN_CLIENT"
+                },
+                {
+                    "name" : "MAPREDUCE2_CLIENT"
+                },
+                {
+                    "name" : "MYSQL_SERVER"
+                },
+                {
+                    "name" : "POSTGRESQL_SERVER"
+                },
+                {
+                    "name" : "GANGLIA_MONITOR"
+                },
+                {
+                    "name" : "DRPC_SERVER"
+                },
+                {
+                    "name" : "NIMBUS"
+                }
+            ],
+            "cardinality" : "1"
+        }
+    ],
+    "Blueprints" : {
+        "blueprint_name" : "blueprint-singlenode-default",
+        "stack_name" : "BigInsights",
+        "stack_version" : "4.0"
+    }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/configuration/cluster-env.xml
new file mode 100755
index 0000000..93c7948
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/configuration/cluster-env.xml
@@ -0,0 +1,304 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+    <property>
+      <name>recovery_enabled</name>
+      <value>true</value>
+      <description>Auto start enabled or not for this cluster.</description>
+      <on-ambari-upgrade add="true"/>
+    </property>
+    <property>
+      <name>recovery_type</name>
+      <value>AUTO_START</value>
+      <description>Auto start type.</description>
+      <on-ambari-upgrade add="true"/>
+    </property>
+    <property>
+      <name>recovery_lifetime_max_count</name>
+      <value>1024</value>
+      <description>Auto start lifetime maximum count of recovery attempt allowed per host component. This is reset when agent is restarted.</description>
+      <on-ambari-upgrade add="true"/>
+    </property>
+    <property>
+      <name>recovery_max_count</name>
+      <value>6</value>
+      <description>Auto start maximum count of recovery attempt allowed per host component in a window. This is reset when agent is restarted.</description>
+      <on-ambari-upgrade add="true"/>
+    </property>
+    <property>
+      <name>recovery_window_in_minutes</name>
+      <value>60</value>
+      <description>Auto start recovery window size in minutes.</description>
+      <on-ambari-upgrade add="true"/>
+    </property>
+    <property>
+      <name>recovery_retry_interval</name>
+      <value>5</value>
+      <description>Auto start recovery retry gap between tries per host component.</description>
+      <on-ambari-upgrade add="true"/>
+    </property>
+    <property>
+        <name>security_enabled</name>
+        <value>false</value>
+        <description>Hadoop Security</description>
+    </property>
+    <property>
+        <name>kerberos_domain</name>
+        <value>EXAMPLE.COM</value>
+        <description>Kerberos realm.</description>
+    </property>
+    <property>
+        <name>ignore_groupsusers_create</name>
+        <display-name>Skip group modifications during install</display-name>
+        <value>false</value>
+        <description>Whether to ignore failures on users and group creation</description>
+        <property-type>ADDITIONAL_USER_PROPERTY</property-type>
+        <value-attributes>
+            <overridable>false</overridable>
+            <type>boolean</type>
+        </value-attributes>
+    </property>
+    <property>
+        <name>smokeuser</name>
+        <display-name>Smoke User</display-name>
+        <value>ambari-qa</value>
+        <property-type>USER</property-type>
+        <description>User executing service checks</description>
+    </property>
+    <property>
+        <name>smokeuser_keytab</name>
+        <value>/etc/security/keytabs/smokeuser.headless.keytab</value>
+        <description>Path to smoke test user keytab file</description>
+    </property>
+    <property>
+        <name>user_group</name>
+        <display-name>Hadoop Group</display-name>
+        <value>hadoop</value>
+        <property-type>GROUP</property-type>
+        <description>Hadoop user group.</description>
+    </property>
+
+  <!-- The properties that end in tar_source describe the pattern of where the tar.gz files come from.
+  They will replace {{ iop_stack_version }} with the "#.#.#.#" value followed by -* (which is the build number in HDP 2.2).
+  When copying those tarballs, Ambari will look up the corresponding tar_destination_folder property to know where it
+  should be copied to.
+  All of the destination folders must begin with hdfs://
+  Please note that the spaces inside of {{ ... }} are important.
+
+  IMPORTANT: Any properties included here must also be declared in site_properties.js
+
+  -->
+  <!-- Tez tarball is needed by Hive Server when using the Tez execution engine. -->
+  <!-- <property>
+    <name>tez_tar_source</name>
+    <value>/usr/iop/current/tez-client/lib/tez.tar.gz</value>
+    <description>Source file path that uses dynamic variables and regex to copy the file to HDFS.</description>
+  </property>
+  <property>
+    <name>tez_tar_destination_folder</name>
+    <value>hdfs:///iop/apps/{{ stack_version }}/tez/</value>
+    <description>Destination HDFS folder for the file.</description>
+  </property>  -->
+
+  <!-- Hive tarball is needed by WebHCat. -->
+  <property>
+    <name>hive_tar_source</name>
+    <value>/usr/iop/current/hive-client/hive.tar.gz</value>
+    <description>Source file path that uses dynamic variables and regex to copy the file to HDFS.</description>
+  </property>
+  <property>
+    <name>hive_tar_destination_folder</name>
+    <value>hdfs:///iop/apps/{{ stack_version }}/hive/</value>
+    <description>Destination HDFS folder for the file.</description>
+  </property>
+
+  <!-- Pig tarball is needed by WebHCat. -->
+  <property>
+    <name>pig_tar_source</name>
+    <value>/usr/iop/current/pig-client/pig.tar.gz</value>
+    <description>Source file path that uses dynamic variables and regex to copy the file to HDFS.</description>
+  </property>
+  <property>
+    <name>pig_tar_destination_folder</name>
+    <value>hdfs:///iop/apps/{{ stack_version }}/pig/</value>
+    <description>Destination HDFS folder for the file.</description>
+  </property>
+
+  <!-- Hadoop Streaming jar is needed by WebHCat. -->
+  <property>
+    <name>hadoop-streaming_tar_source</name>
+    <value>/usr/iop/current/hadoop-mapreduce-client/hadoop-streaming.jar</value>
+    <description>Source file path that uses dynamic variables and regex to copy the file to HDFS.</description>
+  </property>
+  <property>
+    <name>hadoop-streaming_tar_destination_folder</name>
+    <value>hdfs:///iop/apps/{{ stack_version }}/mapreduce/</value>
+    <description>Destination HDFS folder for the file.</description>
+  </property>
+
+  <!-- Sqoop tarball is needed by WebHCat. -->
+  <property>
+    <name>sqoop_tar_source</name>
+    <value>/usr/iop/current/sqoop-client/sqoop.tar.gz</value>
+    <description>Source file path that uses dynamic variables and regex to copy the file to HDFS.</description>
+  </property>
+  <property>
+    <name>sqoop_tar_destination_folder</name>
+    <value>hdfs:///iop/apps/{{ stack_version }}/sqoop/</value>
+    <description>Destination HDFS folder for the file.</description>
+  </property>
+
+  <!-- MapReduce2 tarball -->
+  <property>
+    <name>mapreduce_tar_source</name>
+    <value>/usr/iop/current/hadoop-client/mapreduce.tar.gz</value>
+    <description>Source file path that uses dynamic variables and regex to copy the file to HDFS.</description>
+  </property>
+  <property>
+    <name>mapreduce_tar_destination_folder</name>
+    <value>hdfs:///iop/apps/{{ stack_version }}/mapreduce/</value>
+    <description>Destination HDFS folder for the file.</description>
+  </property>
+  
+  <property>
+    <name>repo_suse_rhel_template</name>
+    <value>[{{repo_id}}]
+name={{repo_id}}
+{% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %}
+
+path=/
+enabled=1
+gpgcheck=0</value>
+    <description>Template of repositories for rhel and suse.</description>
+  </property>
+  <property>
+    <name>repo_ubuntu_template</name>
+    <value>{{package_type}} {{base_url}} {{components}}</value>
+    <description>Template of repositories for ubuntu.</description>
+  </property>
+  
+  <property>
+    <name>override_uid</name>
+    <display-name>Have Ambari manage UIDs</display-name>
+    <value>true</value>
+    <property-type>ADDITIONAL_USER_PROPERTY</property-type>
+    <description>Have Ambari manage UIDs</description>
+    <value-attributes>
+        <overridable>false</overridable>
+        <type>boolean</type>
+    </value-attributes>
+  </property>
+  
+  <property>
+    <name>fetch_nonlocal_groups</name>
+    <value>true</value>
+    <display-name>Ambari fetch nonlocal groups</display-name>
+    <description>Ambari requires fetching all the groups. This can be slow
+        on envs with enabled ldap. Setting this option to false will enable Ambari,
+        to skip user/group management connected with ldap groups.</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>boolean</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  
+  <property>
+    <name>managed_hdfs_resource_property_names</name>
+    <value/>
+    <description>Comma separated list of property names with HDFS resource paths.
+        Resource from this list will be managed even if it is marked as not managed in the stack</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  
+  <!-- Define stack_tools property in the base stack. DO NOT override this property for each stack version -->
+  <property>
+    <name>stack_tools</name>
+    <value/>
+    <description>Stack specific tools</description>
+    <property-type>VALUE_FROM_PROPERTY_FILE</property-type>
+    <value-attributes>
+      <property-file-name>stack_tools.json</property-file-name>
+      <property-file-type>json</property-file-type>
+      <read-only>true</read-only>
+      <overridable>false</overridable>
+      <visible>false</visible>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <!-- Define stack_features property in the base stack. DO NOT override this property for each stack version -->
+  <property>
+    <name>stack_features</name>
+    <value/>
+    <description>List of features supported by the stack</description>
+    <property-type>VALUE_FROM_PROPERTY_FILE</property-type>
+    <value-attributes>
+      <property-file-name>stack_features.json</property-file-name>
+      <property-file-type>json</property-file-type>
+      <read-only>true</read-only>
+      <overridable>false</overridable>
+      <visible>false</visible>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>stack_root</name>
+    <value>/usr/iop</value>
+    <description>Stack root folder</description>
+    <value-attributes>
+      <read-only>true</read-only>
+      <overridable>false</overridable>
+      <visible>false</visible>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>alerts_repeat_tolerance</name>
+    <value>1</value>
+    <description>The number of consecutive alerts required to transition an alert from the SOFT to the HARD state.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>ignore_bad_mounts</name>
+    <value>false</value>
+    <description>For properties handled by handle_mounted_dirs this will make Ambari not to create any directories.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>create_dirs_on_root</name>
+    <value>true</value>
+    <description>For properties handled by handle_mounted_dirs this will make Ambari to create not-existent unknown directories on / partition</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>one_dir_per_partition</name>
+    <value>true</value>
+    <description>For properties handled by handle_mounted_dirs this will make Ambari </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  
+</configuration>


[18/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/package/scripts/spark.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/package/scripts/spark.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/package/scripts/spark.py
new file mode 100755
index 0000000..86c738f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/package/scripts/spark.py
@@ -0,0 +1,351 @@
+#!/usr/bin/python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+import fileinput
+import shutil
+import os
+from resource_management import *
+from resource_management.core.exceptions import ComponentIsNotRunning
+from resource_management.core.logger import Logger
+from resource_management.core import shell
+
+
+def spark(env):
+  import params
+
+  env.set_params(params)
+
+  Directory(params.spark_conf,
+            owner=params.spark_user,
+            create_parents=True,
+            group=params.user_group
+  )
+
+  Directory([params.spark_pid_dir, params.spark_log_dir],
+            owner=params.spark_user,
+            group=params.user_group,
+            mode=0775,
+            create_parents=True
+  )
+  if type == 'server':
+    if action == 'start' or action == 'config':
+      params.HdfsResource(params.spark_hdfs_user_dir,
+                         type="directory",
+                         action="create_on_execute",
+                         owner=params.spark_user,
+                         mode=0775
+      )
+      params.HdfsResource(None, action="execute")
+
+  #file_path = params.spark_conf + '/spark-defaults.conf'
+  #create_file(file_path)
+
+  #write_properties_to_file(file_path, params.config['configurations']['spark-defaults'])
+  configFile("spark-defaults.conf", template_name="spark-defaults.conf.j2")
+
+  # create spark-env.sh in conf dir
+  File(os.path.join(params.spark_conf, 'spark-env.sh'),
+       owner=params.spark_user,
+       group=params.user_group,
+       content=InlineTemplate(params.spark_env_sh)
+  )
+
+  # create log4j.properties in conf dir
+  File(os.path.join(params.spark_conf, 'spark-log4j.properties'),
+       owner=params.spark_user,
+       group=params.user_group,
+       content=InlineTemplate(params.spark_log4j)
+  )
+
+  #create metrics.properties in conf dir
+#  File(os.path.join(params.spark_conf, 'metrics.properties'),
+#       owner=params.spark_user,
+#       group=params.spark_group,
+#       content=InlineTemplate(params.spark_metrics_properties)
+#  )
+
+  # create java-opts in etc/spark/conf dir for iop.version
+  File(os.path.join(params.spark_conf, 'java-opts'),
+       owner=params.spark_user,
+       group=params.user_group,
+       content=params.spark_javaopts_properties
+  )
+
+  if params.is_hive_installed:
+    hive_config = get_hive_config()
+    XmlConfig("hive-site.xml",
+              conf_dir=params.spark_conf,
+              configurations=hive_config,
+              owner=params.spark_user,
+              group=params.user_group,
+              mode=0640)
+def get_hive_config():
+  import params
+  # MUST CONVERT BOOLEANS TO LOWERCASE STRINGS
+  hive_conf_dict = dict()
+  hive_conf_dict['hive.metastore.uris'] = params.config['configurations']['hive-site']['hive.metastore.uris']
+  hive_conf_dict['ambari.hive.db.schema.name'] = params.config['configurations']['hive-site']['ambari.hive.db.schema.name']
+  hive_conf_dict['datanucleus.cache.level2.type'] = params.config['configurations']['hive-site']['datanucleus.cache.level2.type']
+  hive_conf_dict['fs.file.impl.disable.cache'] = str(params.config['configurations']['hive-site']['fs.file.impl.disable.cache']).lower()
+  hive_conf_dict['fs.hdfs.impl.disable.cache'] = str(params.config['configurations']['hive-site']['fs.hdfs.impl.disable.cache']).lower()
+  hive_conf_dict['hive.auto.convert.join'] = str(params.config['configurations']['hive-site']['hive.auto.convert.join']).lower()
+  hive_conf_dict['hive.auto.convert.join.noconditionaltask'] = str(params.config['configurations']['hive-site']['hive.auto.convert.join.noconditionaltask']).lower()
+  hive_conf_dict['hive.auto.convert.join.noconditionaltask.size'] = params.config['configurations']['hive-site']['hive.auto.convert.join.noconditionaltask.size']
+  hive_conf_dict['hive.auto.convert.sortmerge.join'] = str(params.config['configurations']['hive-site']['hive.auto.convert.sortmerge.join']).lower()
+  #hive_conf_dict['hive.auto.convert.sortmerge.join.noconditionaltask'] = str(params.config['configurations']['hive-site']['hive.auto.convert.sortmerge.join.noconditionaltask']).lower()
+  hive_conf_dict['hive.auto.convert.sortmerge.join.to.mapjoin'] = str(params.config['configurations']['hive-site']['hive.auto.convert.sortmerge.join.to.mapjoin']).lower()
+  hive_conf_dict['hive.cbo.enable'] = str(params.config['configurations']['hive-site']['hive.cbo.enable']).lower()
+  hive_conf_dict['hive.cli.print.header'] = str(params.config['configurations']['hive-site']['hive.cli.print.header']).lower()
+  hive_conf_dict['hive.cluster.delegation.token.store.class'] = params.config['configurations']['hive-site']['hive.cluster.delegation.token.store.class']
+  hive_conf_dict['hive.cluster.delegation.token.store.zookeeper.connectString'] = params.config['configurations']['hive-site']['hive.cluster.delegation.token.store.zookeeper.connectString']
+  hive_conf_dict['hive.cluster.delegation.token.store.zookeeper.znode'] = params.config['configurations']['hive-site']['hive.cluster.delegation.token.store.zookeeper.znode']
+  hive_conf_dict['hive.compactor.abortedtxn.threshold'] = params.config['configurations']['hive-site']['hive.compactor.abortedtxn.threshold']
+  hive_conf_dict['hive.compactor.check.interval'] = params.config['configurations']['hive-site']['hive.compactor.check.interval']
+  hive_conf_dict['hive.compactor.delta.num.threshold'] = params.config['configurations']['hive-site']['hive.compactor.delta.num.threshold']
+  hive_conf_dict['hive.compactor.delta.pct.threshold'] = params.config['configurations']['hive-site']['hive.compactor.delta.pct.threshold']
+  hive_conf_dict['hive.compactor.initiator.on'] = str(params.config['configurations']['hive-site']['hive.compactor.initiator.on']).lower()
+  hive_conf_dict['hive.compactor.worker.threads'] = params.config['configurations']['hive-site']['hive.compactor.worker.threads']
+  hive_conf_dict['hive.compactor.worker.timeout'] = params.config['configurations']['hive-site']['hive.compactor.worker.timeout']
+  hive_conf_dict['hive.compute.query.using.stats'] = str(params.config['configurations']['hive-site']['hive.compute.query.using.stats']).lower()
+  hive_conf_dict['hive.conf.restricted.list'] = params.config['configurations']['hive-site']['hive.conf.restricted.list']
+  hive_conf_dict['hive.enforce.bucketing'] = str(params.config['configurations']['hive-site']['hive.enforce.bucketing']).lower()
+  hive_conf_dict['hive.enforce.sorting'] = str(params.config['configurations']['hive-site']['hive.enforce.sorting']).lower()
+  hive_conf_dict['hive.enforce.sortmergebucketmapjoin'] = str(params.config['configurations']['hive-site']['hive.enforce.sortmergebucketmapjoin']).lower()
+  hive_conf_dict['hive.exec.compress.intermediate'] = str(params.config['configurations']['hive-site']['hive.exec.compress.intermediate']).lower()
+  hive_conf_dict['hive.exec.compress.output'] = str(params.config['configurations']['hive-site']['hive.exec.compress.output']).lower()
+  hive_conf_dict['hive.exec.dynamic.partition'] = str(params.config['configurations']['hive-site']['hive.exec.dynamic.partition']).lower()
+  hive_conf_dict['hive.exec.dynamic.partition.mode'] = params.config['configurations']['hive-site']['hive.exec.dynamic.partition.mode']
+  hive_conf_dict['hive.exec.max.created.files'] = params.config['configurations']['hive-site']['hive.exec.max.created.files']
+  hive_conf_dict['hive.exec.max.dynamic.partitions'] = params.config['configurations']['hive-site']['hive.exec.max.dynamic.partitions']
+  hive_conf_dict['hive.exec.max.dynamic.partitions.pernode'] = params.config['configurations']['hive-site']['hive.exec.max.dynamic.partitions.pernode']
+  hive_conf_dict['hive.exec.orc.compression.strategy'] = params.config['configurations']['hive-site']['hive.exec.orc.compression.strategy']
+  hive_conf_dict['hive.exec.orc.default.compress'] = params.config['configurations']['hive-site']['hive.exec.orc.default.compress']
+  hive_conf_dict['hive.exec.orc.default.stripe.size'] = params.config['configurations']['hive-site']['hive.exec.orc.default.stripe.size']
+  hive_conf_dict['hive.exec.parallel'] = str(params.config['configurations']['hive-site']['hive.exec.parallel']).lower()
+  hive_conf_dict['hive.exec.parallel.thread.number'] = params.config['configurations']['hive-site']['hive.exec.parallel.thread.number']
+  hive_conf_dict['hive.exec.reducers.bytes.per.reducer'] = params.config['configurations']['hive-site']['hive.exec.reducers.bytes.per.reducer']
+  hive_conf_dict['hive.exec.reducers.max'] = params.config['configurations']['hive-site']['hive.exec.reducers.max']
+  hive_conf_dict['hive.exec.scratchdir'] = params.config['configurations']['hive-site']['hive.exec.scratchdir']
+  hive_conf_dict['hive.exec.submit.local.task.via.child'] = str(params.config['configurations']['hive-site']['hive.exec.submit.local.task.via.child']).lower()
+  hive_conf_dict['hive.exec.submitviachild'] = str(params.config['configurations']['hive-site']['hive.exec.submitviachild']).lower()
+  hive_conf_dict['hive.execution.engine'] = params.config['configurations']['hive-site']['hive.execution.engine']
+  hive_conf_dict['hive.fetch.task.aggr'] = str(params.config['configurations']['hive-site']['hive.fetch.task.aggr']).lower()
+  hive_conf_dict['hive.fetch.task.conversion'] = params.config['configurations']['hive-site']['hive.fetch.task.conversion']
+  hive_conf_dict['hive.fetch.task.conversion.threshold'] = params.config['configurations']['hive-site']['hive.fetch.task.conversion.threshold']
+  hive_conf_dict['hive.heapsize'] = params.config['configurations']['hive-env']['hive.heapsize']
+  hive_conf_dict['hive.limit.optimize.enable'] = str(params.config['configurations']['hive-site']['hive.limit.optimize.enable']).lower()
+  hive_conf_dict['hive.limit.pushdown.memory.usage'] = params.config['configurations']['hive-site']['hive.limit.pushdown.memory.usage']
+  hive_conf_dict['hive.metastore.authorization.storage.checks'] = str(params.config['configurations']['hive-site']['hive.metastore.authorization.storage.checks']).lower()
+  hive_conf_dict['hive.metastore.cache.pinobjtypes'] = params.config['configurations']['hive-site']['hive.metastore.cache.pinobjtypes']
+  # The following two parameters are hardcoded to workaround an issue
+  # interpreting the syntax for the property values (unit like s for seconds
+  # result in an error)
+  #hive_conf_dict['hive.metastore.client.connect.retry.delay'] = params.config['configurations']['hive-site']['hive.metastore.client.connect.retry.delay']
+  #hive_conf_dict['hive.metastore.client.socket.timeout'] = params.config['configurations']['hive-site']['hive.metastore.client.socket.timeout']
+  hive_conf_dict['hive.metastore.client.connect.retry.delay'] = '5'
+  hive_conf_dict['hive.metastore.client.socket.timeout'] = '1800'
+  hive_conf_dict['hive.metastore.connect.retries'] = params.config['configurations']['hive-site']['hive.metastore.connect.retries']
+  hive_conf_dict['hive.metastore.execute.setugi'] = str(params.config['configurations']['hive-site']['hive.metastore.execute.setugi']).lower()
+  hive_conf_dict['hive.metastore.failure.retries'] = params.config['configurations']['hive-site']['hive.metastore.failure.retries']
+  hive_conf_dict['hive.metastore.kerberos.keytab.file'] = params.config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
+  hive_conf_dict['hive.metastore.kerberos.principal'] = params.config['configurations']['hive-site']['hive.metastore.kerberos.principal']
+  hive_conf_dict['hive.metastore.pre.event.listeners'] = params.config['configurations']['hive-site']['hive.metastore.pre.event.listeners']
+  hive_conf_dict['hive.metastore.sasl.enabled'] = str(params.config['configurations']['hive-site']['hive.metastore.sasl.enabled']).lower()
+  hive_conf_dict['hive.metastore.server.max.threads'] = params.config['configurations']['hive-site']['hive.metastore.server.max.threads']
+  hive_conf_dict['hive.metastore.warehouse.dir'] = params.config['configurations']['hive-site']['hive.metastore.warehouse.dir']
+  hive_conf_dict['hive.orc.compute.splits.num.threads'] = params.config['configurations']['hive-site']['hive.orc.compute.splits.num.threads']
+  hive_conf_dict['hive.orc.splits.include.file.footer'] = str(params.config['configurations']['hive-site']['hive.orc.splits.include.file.footer']).lower()
+  hive_conf_dict['hive.security.authenticator.manager'] = params.config['configurations']['hive-site']['hive.security.authenticator.manager']
+  hive_conf_dict['hive.security.metastore.authenticator.manager'] = params.config['configurations']['hive-site']['hive.security.metastore.authenticator.manager']
+  hive_conf_dict['hive.security.metastore.authorization.auth.reads'] = str(params.config['configurations']['hive-site']['hive.security.metastore.authorization.auth.reads']).lower()
+  hive_conf_dict['hive.security.metastore.authorization.manager'] = params.config['configurations']['hive-site']['hive.security.metastore.authorization.manager']
+  hive_conf_dict['hive.server2.allow.user.substitution'] = str(params.config['configurations']['hive-site']['hive.server2.allow.user.substitution']).lower()
+  hive_conf_dict['hive.server2.logging.operation.enabled'] = str(params.config['configurations']['hive-site']['hive.server2.logging.operation.enabled']).lower()
+  hive_conf_dict['hive.server2.logging.operation.log.location'] = params.config['configurations']['hive-site']['hive.server2.logging.operation.log.location']
+  hive_conf_dict['hive.server2.support.dynamic.service.discovery'] = str(params.config['configurations']['hive-site']['hive.server2.support.dynamic.service.discovery']).lower()
+  hive_conf_dict['hive.server2.table.type.mapping'] = params.config['configurations']['hive-site']['hive.server2.table.type.mapping']
+  hive_conf_dict['hive.server2.thrift.http.path'] = params.config['configurations']['hive-site']['hive.server2.thrift.http.path']
+  hive_conf_dict['hive.server2.thrift.max.worker.threads'] = params.config['configurations']['hive-site']['hive.server2.thrift.max.worker.threads']
+  hive_conf_dict['hive.server2.thrift.port'] = params.spark_thriftserver_port
+  hive_conf_dict['hive.server2.thrift.sasl.qop'] = params.config['configurations']['hive-site']['hive.server2.thrift.sasl.qop']
+  hive_conf_dict['hive.server2.transport.mode'] = params.config['configurations']['hive-site']['hive.server2.transport.mode']
+  hive_conf_dict['hive.server2.use.SSL'] = str(params.config['configurations']['hive-site']['hive.server2.use.SSL']).lower()
+  hive_conf_dict['hive.server2.zookeeper.namespace'] = params.config['configurations']['hive-site']['hive.server2.zookeeper.namespace']
+  hive_conf_dict['hive.smbjoin.cache.rows'] = params.config['configurations']['hive-site']['hive.smbjoin.cache.rows']
+  hive_conf_dict['hive.stats.autogather'] = str(params.config['configurations']['hive-site']['hive.stats.autogather']).lower()
+  hive_conf_dict['hive.stats.dbclass'] = params.config['configurations']['hive-site']['hive.stats.dbclass']
+  hive_conf_dict['hive.stats.fetch.column.stats'] = params.config['configurations']['hive-site']['hive.stats.fetch.column.stats']
+  hive_conf_dict['hive.stats.fetch.partition.stats'] = str(params.config['configurations']['hive-site']['hive.stats.fetch.partition.stats']).lower()
+  hive_conf_dict['hive.support.concurrency'] = str(params.config['configurations']['hive-site']['hive.support.concurrency']).lower()
+  hive_conf_dict['hive.txn.manager'] = params.config['configurations']['hive-site']['hive.txn.manager']
+  hive_conf_dict['hive.txn.max.open.batch'] = params.config['configurations']['hive-site']['hive.txn.max.open.batch']
+  hive_conf_dict['hive.txn.timeout'] = params.config['configurations']['hive-site']['hive.txn.timeout']
+  hive_conf_dict['hive.vectorized.execution.enabled'] = str(params.config['configurations']['hive-site']['hive.vectorized.execution.enabled']).lower()
+  hive_conf_dict['hive.vectorized.execution.reduce.enabled'] = str(params.config['configurations']['hive-site']['hive.vectorized.execution.reduce.enabled']).lower()
+  hive_conf_dict['hive.vectorized.groupby.checkinterval'] = params.config['configurations']['hive-site']['hive.vectorized.groupby.checkinterval']
+  hive_conf_dict['hive.vectorized.groupby.flush.percent'] = params.config['configurations']['hive-site']['hive.vectorized.groupby.flush.percent']
+  hive_conf_dict['hive.vectorized.groupby.maxentries'] = params.config['configurations']['hive-site']['hive.vectorized.groupby.maxentries']
+  hive_conf_dict['hive.zookeeper.client.port'] = params.config['configurations']['hive-site']['hive.zookeeper.client.port']
+  hive_conf_dict['hive.zookeeper.namespace'] = params.config['configurations']['hive-site']['hive.zookeeper.namespace']
+  hive_conf_dict['hive.zookeeper.quorum'] = params.config['configurations']['hive-site']['hive.zookeeper.quorum']
+  hive_conf_dict['javax.jdo.option.ConnectionDriverName'] = params.config['configurations']['hive-site']['javax.jdo.option.ConnectionDriverName']
+  hive_conf_dict['javax.jdo.option.ConnectionURL'] = params.config['configurations']['hive-site']['javax.jdo.option.ConnectionURL']
+  hive_conf_dict['javax.jdo.option.ConnectionUserName'] = params.config['configurations']['hive-site']['javax.jdo.option.ConnectionUserName']
+
+# Comment out parameters not found in configurations dictionary
+# hive_conf_dict['hive.prewarm.enabled'] = params.config['configurations']['hive-site']['hive.prewarm.enabled']
+# hive_conf_dict['hive.prewarm.numcontainers'] = params.config['configurations']['hive-site']['hive.prewarm.numcontainers']
+#  hive_conf_dict['hive.user.install.directory'] = params.config['configurations']['hive-site']['hive.user.install.directory']
+
+  if params.security_enabled:
+    hive_conf_dict['hive.metastore.sasl.enabled'] =  str(params.config['configurations']['hive-site']['hive.metastore.sasl.enabled']).lower()
+    hive_conf_dict['hive.metastore.kerberos.keytab.file'] = params.config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
+    hive_conf_dict['hive.server2.authentication.spnego.principal'] =  params.config['configurations']['hive-site']['hive.server2.authentication.spnego.principal']
+    hive_conf_dict['hive.server2.authentication.spnego.keytab'] = params.config['configurations']['hive-site']['hive.server2.authentication.spnego.keytab']
+    hive_conf_dict['hive.metastore.kerberos.principal'] = params.config['configurations']['hive-site']['hive.metastore.kerberos.principal']
+    hive_conf_dict['hive.server2.authentication.kerberos.principal'] = params.config['configurations']['hive-site']['hive.server2.authentication.kerberos.principal']
+    hive_conf_dict['hive.server2.authentication.kerberos.keytab'] =  params.config['configurations']['hive-site']['hive.server2.authentication.kerberos.keytab']
+    hive_conf_dict['hive.security.authorization.enabled']=  str(params.config['configurations']['hive-site']['hive.security.authorization.enabled']).lower()
+    hive_conf_dict['hive.server2.enable.doAs'] =  str(params.config['configurations']['hive-site']['hive.server2.enable.doAs']).lower()
+
+  if hive_conf_dict['hive.server2.use.SSL']:
+    if params.config['configurations']['hive-site']['hive.server2.keystore.path'] is not None:
+      hive_conf_dict['hive.server2.keystore.path']= str(params.config['configurations']['hive-site']['hive.server2.keystore.path'])
+    if params.config['configurations']['hive-site']['hive.server2.keystore.password'] is not None:
+      hive_conf_dict['hive.server2.keystore.password']= str(params.config['configurations']['hive-site']['hive.server2.keystore.password'])
+
+  # convert remaining numbers to strings
+  for key, value in hive_conf_dict.iteritems():
+    hive_conf_dict[key] = str(value)
+
+  return hive_conf_dict
+
+
+def spark_properties(params):
+  spark_dict = dict()
+
+  all_spark_config  = params.config['configurations']['spark-defaults']
+  #Add all configs unfiltered first to handle Custom case.
+  spark_dict = all_spark_config.copy()
+
+  spark_dict['spark.yarn.executor.memoryOverhead'] = params.spark_yarn_executor_memoryOverhead
+  spark_dict['spark.yarn.driver.memoryOverhead'] = params.spark_yarn_driver_memoryOverhead
+  spark_dict['spark.yarn.applicationMaster.waitTries'] = params.spark_yarn_applicationMaster_waitTries
+  spark_dict['spark.yarn.scheduler.heartbeat.interval-ms'] = params.spark_yarn_scheduler_heartbeat_interval
+  spark_dict['spark.yarn.max_executor.failures'] = params.spark_yarn_max_executor_failures
+  spark_dict['spark.yarn.queue'] = params.spark_yarn_queue
+  spark_dict['spark.yarn.containerLauncherMaxThreads'] = params.spark_yarn_containerLauncherMaxThreads
+  spark_dict['spark.yarn.submit.file.replication'] = params.spark_yarn_submit_file_replication
+  spark_dict['spark.yarn.preserve.staging.files'] = params.spark_yarn_preserve_staging_files
+
+  # Hardcoded paramaters to be added to spark-defaults.conf
+  spark_dict['spark.yarn.historyServer.address'] = params.spark_history_server_host + ':' + str(
+    params.spark_history_ui_port)
+  spark_dict['spark.history.ui.port'] = params.spark_history_ui_port
+  spark_dict['spark.eventLog.enabled'] = str(params.spark_eventlog_enabled).lower()
+  spark_dict['spark.eventLog.dir'] = params.spark_eventlog_dir
+  spark_dict['spark.history.fs.logDirectory'] = params.spark_eventlog_dir
+  spark_dict['spark.yarn.jar'] = params.spark_yarn_jar
+
+  spark_dict['spark.driver.extraJavaOptions'] = params.spark_driver_extraJavaOptions
+  spark_dict['spark.yarn.am.extraJavaOptions'] = params.spark_yarn_am_extraJavaOptions
+
+  # convert remaining numbers to strings
+  for key, value in spark_dict.iteritems():
+    spark_dict[key] = str(value)
+
+  return spark_dict
+
+
+def write_properties_to_file(file_path, value):
+  for key in value:
+    modify_config(file_path, key, value[key])
+
+
+def modify_config(filepath, variable, setting):
+  var_found = False
+  already_set = False
+  V = str(variable)
+  S = str(setting)
+
+  if ' ' in S:
+    S = '%s' % S
+
+  for line in fileinput.input(filepath, inplace=1):
+    if not line.lstrip(' ').startswith('#') and '=' in line:
+      _infile_var = str(line.split('=')[0].rstrip(' '))
+      _infile_set = str(line.split('=')[1].lstrip(' ').rstrip())
+      if var_found == False and _infile_var.rstrip(' ') == V:
+        var_found = True
+        if _infile_set.lstrip(' ') == S:
+          already_set = True
+        else:
+          line = "%s %s\n" % (V, S)
+
+    sys.stdout.write(line)
+
+  if not var_found:
+    with open(filepath, "a") as f:
+      f.write("%s \t %s\n" % (V, S))
+  elif already_set == True:
+    pass
+  else:
+    pass
+
+  return
+
+
+def create_file(file_path):
+  try:
+    file = open(file_path, 'w')
+    file.close()
+  except:
+    print('Unable to create file: ' + file_path)
+    sys.exit(0)
+
+def configFile(name, template_name=None):
+  import params
+
+  File(format("{spark_conf}/{name}"),
+       content=Template(template_name),
+       owner=params.spark_user,
+       group=params.user_group
+  )
+
+def get_iop_version():
+  try:
+    command = 'iop-select status hadoop-client'
+    return_code, iop_output = shell.call(command, timeout=20)
+  except Exception, e:
+    Logger.error(str(e))
+    raise Fail('Unable to execute iop-select command to retrieve the version.')
+
+  if return_code != 0:
+    raise Fail(
+      'Unable to determine the current version because of a non-zero return code of {0}'.format(str(return_code)))
+
+  iop_version = re.sub('hadoop-client - ', '', iop_output)
+  match = re.match('[0-9]+.[0-9]+.[0-9]+.[0-9]+', iop_version)
+
+  if match is None:
+    raise Fail('Failed to get extracted version')
+
+  return iop_version

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/package/scripts/spark_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/package/scripts/spark_client.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/package/scripts/spark_client.py
new file mode 100755
index 0000000..2345c63
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/package/scripts/spark_client.py
@@ -0,0 +1,61 @@
+#!/usr/bin/python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+#from resource_management.libraries.functions.version import compare_versions, format_stack_version
+from resource_management.core.exceptions import ComponentIsNotRunning
+from resource_management.core.logger import Logger
+from resource_management.core import shell
+from spark import spark
+
+
+class SparkClient(Script):
+
+  def get_component_name(self):
+    return "spark-client"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
+      conf_select.select(params.stack_name, "spark", params.version)
+      stack_select.select("spark-client", params.version)
+      #Execute(format("stack-select set spark-client {version}"))
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+
+    env.set_params(params)
+    spark(env)
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+
+if __name__ == "__main__":
+  SparkClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/package/scripts/status_params.py
new file mode 100755
index 0000000..de8c2d4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/package/scripts/status_params.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+spark_user = config['configurations']['spark-env']['spark_user']
+spark_group = config['configurations']['spark-env']['spark_group']
+user_group = config['configurations']['cluster-env']['user_group']
+
+if 'hive-env' in config['configurations']:
+    hive_user = config['configurations']['hive-env']['hive_user']
+else:
+    hive_user = "hive"
+
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+spark_pid_dir = config['configurations']['spark-env']['spark_pid_dir']
+spark_history_server_pid_file = format("{spark_pid_dir}/spark-{spark_user}-org.apache.spark.deploy.history.HistoryServer-1.pid")
+if security_enabled:
+    spark_thrift_server_pid_file = format("{spark_pid_dir}/spark-{hive_user}-org.apache.spark.sql.hive.thriftserver.HiveThriftServer2-1.pid")
+else:
+    spark_thrift_server_pid_file = format("{spark_pid_dir}/spark-{spark_user}-org.apache.spark.sql.hive.thriftserver.HiveThriftServer2-1.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/package/scripts/thrift_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/package/scripts/thrift_server.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/package/scripts/thrift_server.py
new file mode 100755
index 0000000..39e15d3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/package/scripts/thrift_server.py
@@ -0,0 +1,125 @@
+#!/usr/bin/python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+import socket
+import os
+from resource_management import *
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import conf_select
+from resource_management.core.exceptions import ComponentIsNotRunning
+from resource_management.core.logger import Logger
+from resource_management.core import shell
+from resource_management.libraries.functions import Direction
+from spark import *
+
+
+class ThriftServer(Script):
+
+  def get_component_name(self):
+    return "spark-thriftserver"
+
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
+      conf_select.select(params.stack_name, "spark", params.version)
+      stack_select.select("spark-thriftserver", params.version)
+
+  def install(self, env):
+    self.install_packages(env)
+    import params
+    env.set_params(params)
+    self.configure(env)
+
+  def stop(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    self.configure(env)
+    daemon_cmd = format('{spark_thrift_server_stop}')
+    if params.security_enabled:
+      Execute(daemon_cmd,
+              user=params.hive_user,
+              environment={'JAVA_HOME': params.java_home}
+              )
+    else:
+      Execute(daemon_cmd,
+              user=params.spark_user,
+              environment={'JAVA_HOME': params.java_home}
+      )
+    if os.path.isfile(params.spark_thrift_server_pid_file):
+      os.remove(params.spark_thrift_server_pid_file)
+
+
+  def start(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    # TODO this looks wrong, maybe just call spark(env)
+    self.configure(env)
+
+    if params.security_enabled:
+      hive_kerberos_keytab = params.config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
+      hive_principal = params.config['configurations']['hive-site']['hive.metastore.kerberos.principal'].replace('_HOST', socket.getfqdn().lower())
+      hive_kinit_cmd = format("{kinit_path_local} -kt {hive_kerberos_keytab} {hive_principal}; ")
+      Execute(hive_kinit_cmd, user=params.hive_user)
+
+    # FIXME! TODO! remove this after soft link bug is fixed:
+    #if not os.path.islink('/usr/iop/current/spark'):
+    #  iop_version = get_iop_version()
+    #  cmd = 'ln -s /usr/iop/' + iop_version + '/spark /usr/iop/current/spark'
+    #  Execute(cmd)
+
+    daemon_cmd = format('{spark_thrift_server_start}')
+    no_op_test = format(
+      'ls {spark_thrift_server_pid_file} >/dev/null 2>&1 && ps -p `cat {spark_thrift_server_pid_file}` >/dev/null 2>&1')
+    if (upgrade_type is not None and params.upgrade_direction == Direction.UPGRADE) or params.security_enabled:
+      Execute(daemon_cmd,
+              user=params.hive_user,
+              environment={'JAVA_HOME': params.java_home},
+              not_if=no_op_test
+              )
+    else:
+      Execute(daemon_cmd,
+              user=params.spark_user,
+              environment={'JAVA_HOME': params.java_home},
+              not_if=no_op_test
+      )
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    pid_file = format("{spark_thrift_server_pid_file}")
+    # Recursively check all existing gmetad pid files
+    check_process_status(pid_file)
+
+  # Note: This function is not called from start()/install()
+  def configure(self, env):
+    import params
+
+    env.set_params(params)
+    spark(env)
+
+if __name__ == "__main__":
+  ThriftServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/package/templates/spark-defaults.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/package/templates/spark-defaults.conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/package/templates/spark-defaults.conf.j2
new file mode 100755
index 0000000..83c971d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SPARK/package/templates/spark-defaults.conf.j2
@@ -0,0 +1,43 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+{% for key, value in spark_conf_properties_map.iteritems() -%}
+  {{key}}       {{value}}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SQOOP/configuration/sqoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SQOOP/configuration/sqoop-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SQOOP/configuration/sqoop-env.xml
new file mode 100755
index 0000000..eb9da7b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SQOOP/configuration/sqoop-env.xml
@@ -0,0 +1,59 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <!-- sqoop-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for sqoop-env.sh file</description>
+    <value>
+# Set Hadoop-specific environment variables here.
+
+#Set path to where bin/hadoop is available
+#Set path to where bin/hadoop is available
+export HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+
+#set the path to where bin/hbase is available
+export HBASE_HOME=${HBASE_HOME:-{{hbase_home}}}
+
+#Set the path to where bin/hive is available
+export HIVE_HOME=${HIVE_HOME:-{{hive_home}}}
+
+#Set the path for where zookeper config dir is
+export ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}
+
+# add libthrift in hive to sqoop class path first so hive imports work
+export SQOOP_USER_CLASSPATH="`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}"
+    </value>
+  </property>
+  <property>
+    <name>sqoop_user</name>
+    <description>User to run Sqoop as</description>
+    <property-type>USER</property-type>
+    <value>sqoop</value>
+  </property>
+  <property>
+    <name>jdbc_drivers</name>
+    <description>Comma separated list of additional JDBC drivers class names</description>
+    <value> </value>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SQOOP/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SQOOP/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SQOOP/metainfo.xml
new file mode 100755
index 0000000..f878000
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SQOOP/metainfo.xml
@@ -0,0 +1,93 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>SQOOP</name>
+      <displayName>Sqoop</displayName>
+      <comment>Tool for transferring bulk data between Apache Hadoop and
+        structured data stores such as relational databases
+      </comment>
+      <version>1.4.6</version>
+
+      <components>
+        <component>
+          <name>SQOOP</name>
+          <displayName>Sqoop</displayName>
+          <category>CLIENT</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/sqoop_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>env</type>
+              <fileName>sqoop-env.sh</fileName>
+              <dictionaryName>sqoop-env</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>sqoop</name>
+            </package>
+            <package>
+              <name>mysql-connector-java</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <requiredServices>
+        <service>HDFS</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>sqoop-env</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SQOOP/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SQOOP/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SQOOP/package/scripts/__init__.py
new file mode 100755
index 0000000..5561e10
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SQOOP/package/scripts/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SQOOP/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SQOOP/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SQOOP/package/scripts/params.py
new file mode 100755
index 0000000..eafee9e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SQOOP/package/scripts/params.py
@@ -0,0 +1,95 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.functions.version import format_stack_version, compare_versions
+from resource_management.libraries.functions.default import default
+from resource_management import *
+
+# a map of the Ambari role to the component name
+# for use with /usr/iop/current/<component>
+SERVER_ROLE_DIRECTORY_MAP = {
+  'SQOOP' : 'sqoop-client'
+}
+
+component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "SQOOP")
+
+config = Script.get_config()
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+
+stack_name = default("/hostLevelParams/stack_name", None)
+
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+stack_version = format_stack_version(stack_version_unformatted)
+
+# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
+version = default("/commandParams/version", None)
+
+#hadoop params
+sqoop_conf_dir = "/usr/iop/current/sqoop-client/conf"
+sqoop_lib = '/usr/iop/current/sqoop-client/lib'
+hadoop_home = '/usr/iop/current/hadoop-client'
+hbase_home = '/usr/iop/current/hbase-client'
+hive_home = '/usr/iop/current/hive-client'
+sqoop_bin_dir = '/usr/iop/current/sqoop-client/bin/'
+
+zoo_conf_dir = "/usr/iop/current/zookeeper-client/conf"
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
+user_group = config['configurations']['cluster-env']['user_group']
+sqoop_env_sh_template = config['configurations']['sqoop-env']['content']
+
+sqoop_user = config['configurations']['sqoop-env']['sqoop_user']
+
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+kinit_path_local = functions.get_kinit_path()
+#JDBC driver jar name
+sqoop_jdbc_drivers_dict = {}
+sqoop_jdbc_drivers_name_dict = {}
+if "jdbc_drivers" in config['configurations']['sqoop-env']:
+  sqoop_jdbc_drivers = config['configurations']['sqoop-env']['jdbc_drivers'].split(',')
+
+  for driver_name in sqoop_jdbc_drivers:
+    driver_name = driver_name.strip()
+    if driver_name and not driver_name == '':
+      if driver_name == "com.microsoft.sqlserver.jdbc.SQLServerDriver":
+        jdbc_jar_name = "sqljdbc4.jar"
+        jdbc_symlink_name = "mssql-jdbc-driver.jar"
+        jdbc_driver_name = "mssql"
+      elif driver_name == "com.mysql.jdbc.Driver":
+        jdbc_jar_name = "mysql-connector-java.jar"
+        jdbc_symlink_name = "mysql-jdbc-driver.jar"
+        jdbc_driver_name = "mysql"
+      elif driver_name == "org.postgresql.Driver":
+        jdbc_jar_name = "postgresql-jdbc.jar"
+        jdbc_symlink_name = "postgres-jdbc-driver.jar"
+        jdbc_driver_name = "postgres"
+      elif driver_name == "oracle.jdbc.driver.OracleDriver":
+        jdbc_jar_name = "ojdbc.jar"
+        jdbc_symlink_name = "oracle-jdbc-driver.jar"
+        jdbc_driver_name = "oracle"
+      elif driver_name == "org.hsqldb.jdbc.JDBCDriver":
+        jdbc_jar_name = "hsqldb.jar"
+        jdbc_symlink_name = "hsqldb-jdbc-driver.jar"
+        jdbc_driver_name = "hsqldb"
+    else:
+      continue
+    sqoop_jdbc_drivers_dict[jdbc_jar_name] = jdbc_symlink_name
+    sqoop_jdbc_drivers_name_dict[jdbc_jar_name] = jdbc_driver_name
+jdk_location = config['hostLevelParams']['jdk_location']

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SQOOP/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SQOOP/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SQOOP/package/scripts/service_check.py
new file mode 100755
index 0000000..b1f658d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SQOOP/package/scripts/service_check.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+
+from resource_management import *
+
+
+class SqoopServiceCheck(Script):
+
+  def get_component_name(self):
+    return "sqoop-server"
+
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    if params.security_enabled:
+      Execute(format("{kinit_path_local}  -kt {smoke_user_keytab} {smokeuser_principal}"),
+              user = params.smokeuser,
+      )
+    Execute("sqoop version",
+            user = params.smokeuser,
+            path = params.sqoop_bin_dir,
+            logoutput = True
+    )
+
+if __name__ == "__main__":
+  SqoopServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SQOOP/package/scripts/sqoop.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SQOOP/package/scripts/sqoop.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SQOOP/package/scripts/sqoop.py
new file mode 100755
index 0000000..79fe2af
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SQOOP/package/scripts/sqoop.py
@@ -0,0 +1,84 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.core.source import InlineTemplate, DownloadSource
+from resource_management.libraries.functions import format
+from resource_management.core.resources.system import File, Link, Directory
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+import os
+
+def sqoop(type=None):
+  import params
+  Link(params.sqoop_lib + "/mysql-connector-java.jar",
+       to = '/usr/share/java/mysql-connector-java.jar'
+  )
+
+  jdbc_connector()
+
+  Directory(params.sqoop_conf_dir,
+            owner = params.sqoop_user,
+            group = params.user_group,
+            create_parents = True
+  )
+  File(format("{sqoop_conf_dir}/sqoop-env.sh"),
+    owner=params.sqoop_user,
+    group = params.user_group,
+    content=InlineTemplate(params.sqoop_env_sh_template)
+  )
+  update_config_permissions(["sqoop-env-template.sh",
+                             "sqoop-site-template.xml",
+                             "sqoop-site.xml"])
+  pass
+
+def update_config_permissions(names):
+  import params
+  for filename in names:
+    full_filename = os.path.join(params.sqoop_conf_dir, filename)
+    File(full_filename,
+         owner = params.sqoop_user,
+         group = params.user_group,
+         only_if = format("test -e {full_filename}")
+    )
+
+def jdbc_connector():
+  import params
+  from urllib2 import HTTPError
+  from resource_management import Fail
+  for jar_name in params.sqoop_jdbc_drivers_dict:
+    if 'mysql-connector-java.jar' in jar_name:
+      continue
+    downloaded_custom_connector = format("{sqoop_lib}/{jar_name}")
+    jdbc_symlink_remote = params.sqoop_jdbc_drivers_dict[jar_name]
+    jdbc_driver_label = params.sqoop_jdbc_drivers_name_dict[jar_name]
+    driver_curl_source = format("{jdk_location}/{jdbc_symlink_remote}")
+    environment = {
+      "no_proxy": format("{ambari_server_hostname}")
+    }
+    try:
+      File(downloaded_custom_connector,
+           content = DownloadSource(driver_curl_source),
+           mode = 0644,
+      )
+    except HTTPError:
+      error_string = format("Could not download {driver_curl_source}\n\
+                 Please upload jdbc driver to server by run command:\n\
+                 ambari-server setup --jdbc-db={jdbc_driver_label} --jdbc-driver=<PATH TO DRIVER>\n\
+                 at {ambari_server_hostname}")
+      raise Fail(error_string)

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SQOOP/package/scripts/sqoop_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SQOOP/package/scripts/sqoop_client.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SQOOP/package/scripts/sqoop_client.py
new file mode 100755
index 0000000..56b7135
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/SQOOP/package/scripts/sqoop_client.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
+
+from sqoop import sqoop
+
+
+class SqoopClient(Script):
+
+  def get_component_name(self):
+    return "sqoop-client"
+
+  def pre_rolling_restart(self, env):
+    import params
+    env.set_params(params)
+
+    if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
+      conf_select.select(params.stack_name, "sqoop", params.version)
+      stack_select.select("sqoop-client", params.version)
+      #Execute(format("stack-select set sqoop-client {version}"))
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    sqoop(type='client')
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+  SqoopClient().execute()


[28/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/alerts/alert_webhcat_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/alerts/alert_webhcat_server.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/alerts/alert_webhcat_server.py
new file mode 100755
index 0000000..15627a5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/alerts/alert_webhcat_server.py
@@ -0,0 +1,242 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+import subprocess
+import socket
+import time
+import urllib2
+
+from resource_management.core.environment import Environment
+from resource_management.core.resources import Execute
+from resource_management.core import shell
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions import get_klist_path
+from os import getpid, sep
+
+RESULT_CODE_OK = "OK"
+RESULT_CODE_CRITICAL = "CRITICAL"
+RESULT_CODE_UNKNOWN = "UNKNOWN"
+
+OK_MESSAGE = "WebHCat status was OK ({0:.3f}s response from {1})"
+CRITICAL_CONNECTION_MESSAGE = "Connection failed to {0}"
+CRITICAL_HTTP_MESSAGE = "HTTP {0} response from {1}"
+CRITICAL_WEBHCAT_STATUS_MESSAGE = 'WebHCat returned an unexpected status of "{0}"'
+CRITICAL_WEBHCAT_UNKNOWN_JSON_MESSAGE = "Unable to determine WebHCat health from unexpected JSON response"
+
+TEMPLETON_PORT_KEY = '{{webhcat-site/templeton.port}}'
+SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
+WEBHCAT_PRINCIPAL_KEY = '{{webhcat-site/templeton.kerberos.principal}}'
+WEBHCAT_KEYTAB_KEY = '{{webhcat-site/templeton.kerberos.keytab}}'
+SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
+
+# The configured Kerberos executable search paths, if any
+KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY = '{{kerberos-env/executable_search_paths}}'
+
+WEBHCAT_OK_RESPONSE = 'ok'
+WEBHCAT_PORT_DEFAULT = 50111
+
+CONNECTION_TIMEOUT_KEY = 'connection.timeout'
+CONNECTION_TIMEOUT_DEFAULT = 5.0
+CURL_CONNECTION_TIMEOUT_DEFAULT = str(int(CONNECTION_TIMEOUT_DEFAULT))
+
+# default smoke user
+SMOKEUSER_SCRIPT_PARAM_KEY = 'default.smoke.user'
+SMOKEUSER_DEFAULT = 'ambari-qa'
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (TEMPLETON_PORT_KEY, SECURITY_ENABLED_KEY, WEBHCAT_KEYTAB_KEY,
+    WEBHCAT_PRINCIPAL_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY, SMOKEUSER_KEY)
+
+
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  result_code = RESULT_CODE_UNKNOWN
+
+  if configurations is None:
+    return (result_code, ['There were no configurations supplied to the script.'])
+
+  webhcat_port = WEBHCAT_PORT_DEFAULT
+  if TEMPLETON_PORT_KEY in configurations:
+    webhcat_port = int(configurations[TEMPLETON_PORT_KEY])
+
+  security_enabled = False
+  if SECURITY_ENABLED_KEY in configurations:
+    security_enabled = configurations[SECURITY_ENABLED_KEY].lower() == 'true'
+
+  # parse script arguments
+  connection_timeout = CONNECTION_TIMEOUT_DEFAULT
+  curl_connection_timeout = CURL_CONNECTION_TIMEOUT_DEFAULT
+  if CONNECTION_TIMEOUT_KEY in parameters:
+    connection_timeout = float(parameters[CONNECTION_TIMEOUT_KEY])
+    curl_connection_timeout = str(int(connection_timeout))
+
+
+  # the alert will always run on the webhcat host
+  if host_name is None:
+    host_name = socket.getfqdn()
+
+  smokeuser = SMOKEUSER_DEFAULT
+
+  if SMOKEUSER_KEY in configurations:
+    smokeuser = configurations[SMOKEUSER_KEY]
+
+  if SMOKEUSER_SCRIPT_PARAM_KEY in parameters:
+    smokeuser = parameters[SMOKEUSER_SCRIPT_PARAM_KEY]
+
+  # webhcat always uses http, never SSL
+  query_url = "http://{0}:{1}/templeton/v1/status?user.name={2}".format(host_name, webhcat_port, smokeuser)
+
+  # initialize
+  total_time = 0
+  json_response = {}
+
+  if security_enabled:
+    if WEBHCAT_KEYTAB_KEY not in configurations or WEBHCAT_PRINCIPAL_KEY not in configurations:
+      return (RESULT_CODE_UNKNOWN, [str(configurations)])
+
+    try:
+      webhcat_keytab = configurations[WEBHCAT_KEYTAB_KEY]
+      webhcat_principal = configurations[WEBHCAT_PRINCIPAL_KEY]
+
+      # substitute _HOST in kerberos principal with actual fqdn
+      webhcat_principal = webhcat_principal.replace('_HOST', host_name)
+
+      # Create the kerberos credentials cache (ccache) file and set it in the environment to use
+      # when executing curl
+      env = Environment.get_instance()
+      ccache_file = "{0}{1}webhcat_alert_cc_{2}".format(env.tmp_dir, sep, getpid())
+      kerberos_env = {'KRB5CCNAME': ccache_file}
+
+      # Get the configured Kerberos executable search paths, if any
+      if KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY in configurations:
+        kerberos_executable_search_paths = configurations[KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY]
+      else:
+        kerberos_executable_search_paths = None
+
+      klist_path_local = get_klist_path(kerberos_executable_search_paths)
+      klist_command = format("{klist_path_local} -s {ccache_file}")
+
+      # Determine if we need to kinit by testing to see if the relevant cache exists and has
+      # non-expired tickets.  Tickets are marked to expire after 5 minutes to help reduce the number
+      # it kinits we do but recover quickly when keytabs are regenerated
+      return_code, _ = shell.call(klist_command, user=smokeuser)
+      if return_code != 0:
+        kinit_path_local = get_kinit_path(kerberos_executable_search_paths)
+        kinit_command = format("{kinit_path_local} -l 5m -c {ccache_file} -kt {webhcat_keytab} {webhcat_principal}; ")
+
+        # kinit so that curl will work with --negotiate
+        Execute(kinit_command,
+                user=smokeuser,
+        )
+
+      # make a single curl call to get just the http code
+      _, stdout, stderr = shell.checked_call(['curl', '--negotiate', '-u', ':', '-sL', '-w',
+        '%{http_code}', '--connect-timeout', curl_connection_timeout,
+        '-o', '/dev/null', query_url], stderr=subprocess.PIPE, env=kerberos_env)
+
+      if stderr != '':
+        raise Exception(stderr)
+
+      # check the response code
+      response_code = int(stdout)
+
+      # 0 indicates no connection
+      if response_code == 0:
+        label = CRITICAL_CONNECTION_MESSAGE.format(query_url)
+        return (RESULT_CODE_CRITICAL, [label])
+
+      # any other response aside from 200 is a problem
+      if response_code != 200:
+        label = CRITICAL_HTTP_MESSAGE.format(response_code, query_url)
+        return (RESULT_CODE_CRITICAL, [label])
+
+      # now that we have the http status and it was 200, get the content
+      start_time = time.time()
+      _, stdout, stderr = shell.checked_call(['curl', '--negotiate', '-u', ':', '-sL',
+        '--connect-timeout', curl_connection_timeout, query_url, ],
+        stderr=subprocess.PIPE, env=kerberos_env)
+
+      total_time = time.time() - start_time
+
+      if stderr != '':
+        raise Exception(stderr)
+
+      json_response = json.loads(stdout)
+    except Exception, exception:
+      return (RESULT_CODE_CRITICAL, [str(exception)])
+  else:
+    url_response = None
+
+    try:
+      # execute the query for the JSON that includes WebHCat status
+      start_time = time.time()
+      url_response = urllib2.urlopen(query_url, timeout=connection_timeout)
+      total_time = time.time() - start_time
+
+      json_response = json.loads(url_response.read())
+    except urllib2.HTTPError as httpError:
+      label = CRITICAL_HTTP_MESSAGE.format(httpError.code, query_url)
+      return (RESULT_CODE_CRITICAL, [label])
+    except:
+      label = CRITICAL_CONNECTION_MESSAGE.format(query_url)
+      return (RESULT_CODE_CRITICAL, [label])
+    finally:
+      if url_response is not None:
+        try:
+          url_response.close()
+        except:
+          pass
+
+
+  # if status is not in the response, we can't do any check; return CRIT
+  if 'status' not in json_response:
+    return (RESULT_CODE_CRITICAL, [CRITICAL_WEBHCAT_UNKNOWN_JSON_MESSAGE])
+
+
+  # URL response received, parse it
+  try:
+    webhcat_status = json_response['status']
+  except:
+    return (RESULT_CODE_CRITICAL, [CRITICAL_WEBHCAT_UNKNOWN_JSON_MESSAGE])
+
+
+  # proper JSON received, compare against known value
+  if webhcat_status.lower() == WEBHCAT_OK_RESPONSE:
+    result_code = RESULT_CODE_OK
+    label = OK_MESSAGE.format(total_time, query_url)
+  else:
+    result_code = RESULT_CODE_CRITICAL
+    label = CRITICAL_WEBHCAT_STATUS_MESSAGE.format(webhcat_status)
+
+  return (result_code, [label])

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/etc/hive-schema-0.12.0.mysql.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/etc/hive-schema-0.12.0.mysql.sql b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/etc/hive-schema-0.12.0.mysql.sql
new file mode 100755
index 0000000..bacee9e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/etc/hive-schema-0.12.0.mysql.sql
@@ -0,0 +1,777 @@
+-- MySQL dump 10.13  Distrib 5.5.25, for osx10.6 (i386)
+--
+-- Host: localhost    Database: test
+-- ------------------------------------------------------
+-- Server version	5.5.25
+
+/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
+/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
+/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
+/*!40101 SET NAMES utf8 */;
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+
+--
+-- Table structure for table `BUCKETING_COLS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `BUCKETING_COLS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `BUCKET_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `BUCKETING_COLS_N49` (`SD_ID`),
+  CONSTRAINT `BUCKETING_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `CDS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `CDS` (
+  `CD_ID` bigint(20) NOT NULL,
+  PRIMARY KEY (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `COLUMNS_V2`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `COLUMNS_V2` (
+  `CD_ID` bigint(20) NOT NULL,
+  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TYPE_NAME` varchar(4000) DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`CD_ID`,`COLUMN_NAME`),
+  KEY `COLUMNS_V2_N49` (`CD_ID`),
+  CONSTRAINT `COLUMNS_V2_FK1` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DATABASE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DATABASE_PARAMS` (
+  `DB_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(180) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`DB_ID`,`PARAM_KEY`),
+  KEY `DATABASE_PARAMS_N49` (`DB_ID`),
+  CONSTRAINT `DATABASE_PARAMS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DBS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DBS` (
+  `DB_ID` bigint(20) NOT NULL,
+  `DESC` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `DB_LOCATION_URI` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`DB_ID`),
+  UNIQUE KEY `UNIQUE_DATABASE` (`NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DB_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DB_PRIVS` (
+  `DB_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `DB_ID` bigint(20) DEFAULT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `DB_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`DB_GRANT_ID`),
+  UNIQUE KEY `DBPRIVILEGEINDEX` (`DB_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`DB_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `DB_PRIVS_N49` (`DB_ID`),
+  CONSTRAINT `DB_PRIVS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `GLOBAL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `GLOBAL_PRIVS` (
+  `USER_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `USER_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`USER_GRANT_ID`),
+  UNIQUE KEY `GLOBALPRIVILEGEINDEX` (`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`USER_PRIV`,`GRANTOR`,`GRANTOR_TYPE`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `IDXS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `IDXS` (
+  `INDEX_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `DEFERRED_REBUILD` bit(1) NOT NULL,
+  `INDEX_HANDLER_CLASS` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INDEX_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INDEX_TBL_ID` bigint(20) DEFAULT NULL,
+  `LAST_ACCESS_TIME` int(11) NOT NULL,
+  `ORIG_TBL_ID` bigint(20) DEFAULT NULL,
+  `SD_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`INDEX_ID`),
+  UNIQUE KEY `UNIQUEINDEX` (`INDEX_NAME`,`ORIG_TBL_ID`),
+  KEY `IDXS_N51` (`SD_ID`),
+  KEY `IDXS_N50` (`INDEX_TBL_ID`),
+  KEY `IDXS_N49` (`ORIG_TBL_ID`),
+  CONSTRAINT `IDXS_FK1` FOREIGN KEY (`ORIG_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
+  CONSTRAINT `IDXS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+  CONSTRAINT `IDXS_FK3` FOREIGN KEY (`INDEX_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `INDEX_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `INDEX_PARAMS` (
+  `INDEX_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`INDEX_ID`,`PARAM_KEY`),
+  KEY `INDEX_PARAMS_N49` (`INDEX_ID`),
+  CONSTRAINT `INDEX_PARAMS_FK1` FOREIGN KEY (`INDEX_ID`) REFERENCES `IDXS` (`INDEX_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `NUCLEUS_TABLES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `NUCLEUS_TABLES` (
+  `CLASS_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TYPE` varchar(4) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `OWNER` varchar(2) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `VERSION` varchar(20) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `INTERFACE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`CLASS_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITIONS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITIONS` (
+  `PART_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `LAST_ACCESS_TIME` int(11) NOT NULL,
+  `PART_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `SD_ID` bigint(20) DEFAULT NULL,
+  `TBL_ID` bigint(20) DEFAULT NULL,
+  `LINK_TARGET_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`PART_ID`),
+  UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`),
+  KEY `PARTITIONS_N49` (`TBL_ID`),
+  KEY `PARTITIONS_N50` (`SD_ID`),
+  KEY `PARTITIONS_N51` (`LINK_TARGET_ID`),
+  CONSTRAINT `PARTITIONS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
+  CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+  CONSTRAINT `PARTITIONS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_EVENTS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_EVENTS` (
+  `PART_NAME_ID` bigint(20) NOT NULL,
+  `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `EVENT_TIME` bigint(20) NOT NULL,
+  `EVENT_TYPE` int(11) NOT NULL,
+  `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_NAME_ID`),
+  KEY `PARTITIONEVENTINDEX` (`PARTITION_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_KEYS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_KEYS` (
+  `TBL_ID` bigint(20) NOT NULL,
+  `PKEY_COMMENT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PKEY_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PKEY_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`TBL_ID`,`PKEY_NAME`),
+  KEY `PARTITION_KEYS_N49` (`TBL_ID`),
+  CONSTRAINT `PARTITION_KEYS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_KEY_VALS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_KEY_VALS` (
+  `PART_ID` bigint(20) NOT NULL,
+  `PART_KEY_VAL` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`PART_ID`,`INTEGER_IDX`),
+  KEY `PARTITION_KEY_VALS_N49` (`PART_ID`),
+  CONSTRAINT `PARTITION_KEY_VALS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_PARAMS` (
+  `PART_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_ID`,`PARAM_KEY`),
+  KEY `PARTITION_PARAMS_N49` (`PART_ID`),
+  CONSTRAINT `PARTITION_PARAMS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PART_COL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PART_COL_PRIVS` (
+  `PART_COLUMN_GRANT_ID` bigint(20) NOT NULL,
+  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_ID` bigint(20) DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_COLUMN_GRANT_ID`),
+  KEY `PART_COL_PRIVS_N49` (`PART_ID`),
+  KEY `PARTITIONCOLUMNPRIVILEGEINDEX` (`PART_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  CONSTRAINT `PART_COL_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PART_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PART_PRIVS` (
+  `PART_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_ID` bigint(20) DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_GRANT_ID`),
+  KEY `PARTPRIVILEGEINDEX` (`PART_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `PART_PRIVS_N49` (`PART_ID`),
+  CONSTRAINT `PART_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `ROLES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `ROLES` (
+  `ROLE_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `ROLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`ROLE_ID`),
+  UNIQUE KEY `ROLEENTITYINDEX` (`ROLE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `ROLE_MAP`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `ROLE_MAP` (
+  `ROLE_GRANT_ID` bigint(20) NOT NULL,
+  `ADD_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `ROLE_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`ROLE_GRANT_ID`),
+  UNIQUE KEY `USERROLEMAPINDEX` (`PRINCIPAL_NAME`,`ROLE_ID`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `ROLE_MAP_N49` (`ROLE_ID`),
+  CONSTRAINT `ROLE_MAP_FK1` FOREIGN KEY (`ROLE_ID`) REFERENCES `ROLES` (`ROLE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SDS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SDS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `CD_ID` bigint(20) DEFAULT NULL,
+  `INPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `IS_COMPRESSED` bit(1) NOT NULL,
+  `IS_STOREDASSUBDIRECTORIES` bit(1) NOT NULL,
+  `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `NUM_BUCKETS` int(11) NOT NULL,
+  `OUTPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `SERDE_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`SD_ID`),
+  KEY `SDS_N49` (`SERDE_ID`),
+  KEY `SDS_N50` (`CD_ID`),
+  CONSTRAINT `SDS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`),
+  CONSTRAINT `SDS_FK2` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SD_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SD_PARAMS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SD_ID`,`PARAM_KEY`),
+  KEY `SD_PARAMS_N49` (`SD_ID`),
+  CONSTRAINT `SD_PARAMS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SEQUENCE_TABLE`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SEQUENCE_TABLE` (
+  `SEQUENCE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `NEXT_VAL` bigint(20) NOT NULL,
+  PRIMARY KEY (`SEQUENCE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SERDES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SERDES` (
+  `SERDE_ID` bigint(20) NOT NULL,
+  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `SLIB` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SERDE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SERDE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SERDE_PARAMS` (
+  `SERDE_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SERDE_ID`,`PARAM_KEY`),
+  KEY `SERDE_PARAMS_N49` (`SERDE_ID`),
+  CONSTRAINT `SERDE_PARAMS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_COL_NAMES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_COL_NAMES` (
+  `SD_ID` bigint(20) NOT NULL,
+  `SKEWED_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `SKEWED_COL_NAMES_N49` (`SD_ID`),
+  CONSTRAINT `SKEWED_COL_NAMES_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_COL_VALUE_LOC_MAP`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_COL_VALUE_LOC_MAP` (
+  `SD_ID` bigint(20) NOT NULL,
+  `STRING_LIST_ID_KID` bigint(20) NOT NULL,
+  `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SD_ID`,`STRING_LIST_ID_KID`),
+  KEY `SKEWED_COL_VALUE_LOC_MAP_N49` (`STRING_LIST_ID_KID`),
+  KEY `SKEWED_COL_VALUE_LOC_MAP_N50` (`SD_ID`),
+  CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK2` FOREIGN KEY (`STRING_LIST_ID_KID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
+  CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_STRING_LIST`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST` (
+  `STRING_LIST_ID` bigint(20) NOT NULL,
+  PRIMARY KEY (`STRING_LIST_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_STRING_LIST_VALUES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST_VALUES` (
+  `STRING_LIST_ID` bigint(20) NOT NULL,
+  `STRING_LIST_VALUE` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`STRING_LIST_ID`,`INTEGER_IDX`),
+  KEY `SKEWED_STRING_LIST_VALUES_N49` (`STRING_LIST_ID`),
+  CONSTRAINT `SKEWED_STRING_LIST_VALUES_FK1` FOREIGN KEY (`STRING_LIST_ID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_VALUES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_VALUES` (
+  `SD_ID_OID` bigint(20) NOT NULL,
+  `STRING_LIST_ID_EID` bigint(20) NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID_OID`,`INTEGER_IDX`),
+  KEY `SKEWED_VALUES_N50` (`SD_ID_OID`),
+  KEY `SKEWED_VALUES_N49` (`STRING_LIST_ID_EID`),
+  CONSTRAINT `SKEWED_VALUES_FK2` FOREIGN KEY (`STRING_LIST_ID_EID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
+  CONSTRAINT `SKEWED_VALUES_FK1` FOREIGN KEY (`SD_ID_OID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SORT_COLS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SORT_COLS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `ORDER` int(11) NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `SORT_COLS_N49` (`SD_ID`),
+  CONSTRAINT `SORT_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TABLE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TABLE_PARAMS` (
+  `TBL_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`TBL_ID`,`PARAM_KEY`),
+  KEY `TABLE_PARAMS_N49` (`TBL_ID`),
+  CONSTRAINT `TABLE_PARAMS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBLS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBLS` (
+  `TBL_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `DB_ID` bigint(20) DEFAULT NULL,
+  `LAST_ACCESS_TIME` int(11) NOT NULL,
+  `OWNER` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `RETENTION` int(11) NOT NULL,
+  `SD_ID` bigint(20) DEFAULT NULL,
+  `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `VIEW_EXPANDED_TEXT` mediumtext,
+  `VIEW_ORIGINAL_TEXT` mediumtext,
+  `LINK_TARGET_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`TBL_ID`),
+  UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`),
+  KEY `TBLS_N50` (`SD_ID`),
+  KEY `TBLS_N49` (`DB_ID`),
+  KEY `TBLS_N51` (`LINK_TARGET_ID`),
+  CONSTRAINT `TBLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+  CONSTRAINT `TBLS_FK2` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`),
+  CONSTRAINT `TBLS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBL_COL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBL_COL_PRIVS` (
+  `TBL_COLUMN_GRANT_ID` bigint(20) NOT NULL,
+  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`TBL_COLUMN_GRANT_ID`),
+  KEY `TABLECOLUMNPRIVILEGEINDEX` (`TBL_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `TBL_COL_PRIVS_N49` (`TBL_ID`),
+  CONSTRAINT `TBL_COL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBL_PRIVS` (
+  `TBL_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`TBL_GRANT_ID`),
+  KEY `TBL_PRIVS_N49` (`TBL_ID`),
+  KEY `TABLEPRIVILEGEINDEX` (`TBL_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  CONSTRAINT `TBL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TAB_COL_STATS`
+--
+CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` (
+ `CS_ID` bigint(20) NOT NULL,
+ `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TBL_ID` bigint(20) NOT NULL,
+ `LONG_LOW_VALUE` bigint(20),
+ `LONG_HIGH_VALUE` bigint(20),
+ `DOUBLE_HIGH_VALUE` double(53,4),
+ `DOUBLE_LOW_VALUE` double(53,4),
+ `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `NUM_NULLS` bigint(20) NOT NULL,
+ `NUM_DISTINCTS` bigint(20),
+ `AVG_COL_LEN` double(53,4),
+ `MAX_COL_LEN` bigint(20),
+ `NUM_TRUES` bigint(20),
+ `NUM_FALSES` bigint(20),
+ `LAST_ANALYZED` bigint(20) NOT NULL,
+  PRIMARY KEY (`CS_ID`),
+  CONSTRAINT `TAB_COL_STATS_FK` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for table `PART_COL_STATS`
+--
+CREATE TABLE IF NOT EXISTS `PART_COL_STATS` (
+ `CS_ID` bigint(20) NOT NULL,
+ `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PART_ID` bigint(20) NOT NULL,
+ `LONG_LOW_VALUE` bigint(20),
+ `LONG_HIGH_VALUE` bigint(20),
+ `DOUBLE_HIGH_VALUE` double(53,4),
+ `DOUBLE_LOW_VALUE` double(53,4),
+ `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `NUM_NULLS` bigint(20) NOT NULL,
+ `NUM_DISTINCTS` bigint(20),
+ `AVG_COL_LEN` double(53,4),
+ `MAX_COL_LEN` bigint(20),
+ `NUM_TRUES` bigint(20),
+ `NUM_FALSES` bigint(20),
+ `LAST_ANALYZED` bigint(20) NOT NULL,
+  PRIMARY KEY (`CS_ID`),
+  CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for table `TYPES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TYPES` (
+  `TYPES_ID` bigint(20) NOT NULL,
+  `TYPE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TYPE1` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TYPE2` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`TYPES_ID`),
+  UNIQUE KEY `UNIQUE_TYPE` (`TYPE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TYPE_FIELDS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TYPE_FIELDS` (
+  `TYPE_NAME` bigint(20) NOT NULL,
+  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `FIELD_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `FIELD_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`TYPE_NAME`,`FIELD_NAME`),
+  KEY `TYPE_FIELDS_N49` (`TYPE_NAME`),
+  CONSTRAINT `TYPE_FIELDS_FK1` FOREIGN KEY (`TYPE_NAME`) REFERENCES `TYPES` (`TYPES_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+-- Table `MASTER_KEYS` for classes [org.apache.hadoop.hive.metastore.model.MMasterKey]
+CREATE TABLE IF NOT EXISTS `MASTER_KEYS`
+(
+    `KEY_ID` INTEGER NOT NULL AUTO_INCREMENT,
+    `MASTER_KEY` VARCHAR(767) BINARY NULL,
+    PRIMARY KEY (`KEY_ID`)
+) ENGINE=INNODB DEFAULT CHARSET=latin1;
+
+-- Table `DELEGATION_TOKENS` for classes [org.apache.hadoop.hive.metastore.model.MDelegationToken]
+CREATE TABLE IF NOT EXISTS `DELEGATION_TOKENS`
+(
+    `TOKEN_IDENT` VARCHAR(767) BINARY NOT NULL,
+    `TOKEN` VARCHAR(767) BINARY NULL,
+    PRIMARY KEY (`TOKEN_IDENT`)
+) ENGINE=INNODB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for VERSION
+--
+CREATE TABLE IF NOT EXISTS `VERSION` (
+  `VER_ID` BIGINT NOT NULL,
+  `SCHEMA_VERSION` VARCHAR(127) NOT NULL,
+  `VERSION_COMMENT` VARCHAR(255),
+  PRIMARY KEY (`VER_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '0.12.0', 'Hive release version 0.12.0');
+
+/*!40101 SET character_set_client = @saved_cs_client */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
+/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
+/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2012-08-23  0:56:31

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/etc/hive-schema-0.12.0.oracle.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/etc/hive-schema-0.12.0.oracle.sql b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/etc/hive-schema-0.12.0.oracle.sql
new file mode 100755
index 0000000..a2cbfa2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/etc/hive-schema-0.12.0.oracle.sql
@@ -0,0 +1,717 @@
+-- Table SEQUENCE_TABLE is an internal table required by DataNucleus.
+-- NOTE: Some versions of SchemaTool do not automatically generate this table.
+-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+CREATE TABLE SEQUENCE_TABLE
+(
+   SEQUENCE_NAME VARCHAR2(255) NOT NULL,
+   NEXT_VAL NUMBER NOT NULL
+);
+
+ALTER TABLE SEQUENCE_TABLE ADD CONSTRAINT PART_TABLE_PK PRIMARY KEY (SEQUENCE_NAME);
+
+-- Table NUCLEUS_TABLES is an internal table required by DataNucleus.
+-- This table is required if datanucleus.autoStartMechanism=SchemaTable
+-- NOTE: Some versions of SchemaTool do not automatically generate this table.
+-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+CREATE TABLE NUCLEUS_TABLES
+(
+   CLASS_NAME VARCHAR2(128) NOT NULL,
+   TABLE_NAME VARCHAR2(128) NOT NULL,
+   TYPE VARCHAR2(4) NOT NULL,
+   OWNER VARCHAR2(2) NOT NULL,
+   VERSION VARCHAR2(20) NOT NULL,
+   INTERFACE_NAME VARCHAR2(255) NULL
+);
+
+ALTER TABLE NUCLEUS_TABLES ADD CONSTRAINT NUCLEUS_TABLES_PK PRIMARY KEY (CLASS_NAME);
+
+-- Table PART_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+CREATE TABLE PART_COL_PRIVS
+(
+    PART_COLUMN_GRANT_ID NUMBER NOT NULL,
+    "COLUMN_NAME" VARCHAR2(128) NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PART_ID NUMBER NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    PART_COL_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY (PART_COLUMN_GRANT_ID);
+
+-- Table CDS.
+CREATE TABLE CDS
+(
+    CD_ID NUMBER NOT NULL
+);
+
+ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID);
+
+-- Table COLUMNS_V2 for join relationship
+CREATE TABLE COLUMNS_V2
+(
+    CD_ID NUMBER NOT NULL,
+    "COMMENT" VARCHAR2(256) NULL,
+    "COLUMN_NAME" VARCHAR2(128) NOT NULL,
+    TYPE_NAME VARCHAR2(4000) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_PK PRIMARY KEY (CD_ID,"COLUMN_NAME");
+
+-- Table PARTITION_KEY_VALS for join relationship
+CREATE TABLE PARTITION_KEY_VALS
+(
+    PART_ID NUMBER NOT NULL,
+    PART_KEY_VAL VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY (PART_ID,INTEGER_IDX);
+
+-- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase]
+CREATE TABLE DBS
+(
+    DB_ID NUMBER NOT NULL,
+    "DESC" VARCHAR2(4000) NULL,
+    DB_LOCATION_URI VARCHAR2(4000) NOT NULL,
+    "NAME" VARCHAR2(128) NULL
+);
+
+ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
+
+-- Table PARTITION_PARAMS for join relationship
+CREATE TABLE PARTITION_PARAMS
+(
+    PART_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PART_ID,PARAM_KEY);
+
+-- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+CREATE TABLE SERDES
+(
+    SERDE_ID NUMBER NOT NULL,
+    "NAME" VARCHAR2(128) NULL,
+    SLIB VARCHAR2(4000) NULL
+);
+
+ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
+
+-- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType]
+CREATE TABLE TYPES
+(
+    TYPES_ID NUMBER NOT NULL,
+    TYPE_NAME VARCHAR2(128) NULL,
+    TYPE1 VARCHAR2(767) NULL,
+    TYPE2 VARCHAR2(767) NULL
+);
+
+ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID);
+
+-- Table PARTITION_KEYS for join relationship
+CREATE TABLE PARTITION_KEYS
+(
+    TBL_ID NUMBER NOT NULL,
+    PKEY_COMMENT VARCHAR2(4000) NULL,
+    PKEY_NAME VARCHAR2(128) NOT NULL,
+    PKEY_TYPE VARCHAR2(767) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEY_PK PRIMARY KEY (TBL_ID,PKEY_NAME);
+
+-- Table ROLES for classes [org.apache.hadoop.hive.metastore.model.MRole]
+CREATE TABLE ROLES
+(
+    ROLE_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    OWNER_NAME VARCHAR2(128) NULL,
+    ROLE_NAME VARCHAR2(128) NULL
+);
+
+ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID);
+
+-- Table PARTITIONS for classes [org.apache.hadoop.hive.metastore.model.MPartition]
+CREATE TABLE PARTITIONS
+(
+    PART_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+    PART_NAME VARCHAR2(767) NULL,
+    SD_ID NUMBER NULL,
+    TBL_ID NUMBER NULL
+);
+
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
+
+-- Table INDEX_PARAMS for join relationship
+CREATE TABLE INDEX_PARAMS
+(
+    INDEX_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PARAM_KEY);
+
+-- Table TBL_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+CREATE TABLE TBL_COL_PRIVS
+(
+    TBL_COLUMN_GRANT_ID NUMBER NOT NULL,
+    "COLUMN_NAME" VARCHAR2(128) NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    TBL_COL_PRIV VARCHAR2(128) NULL,
+    TBL_ID NUMBER NULL
+);
+
+ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_PK PRIMARY KEY (TBL_COLUMN_GRANT_ID);
+
+-- Table IDXS for classes [org.apache.hadoop.hive.metastore.model.MIndex]
+CREATE TABLE IDXS
+(
+    INDEX_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    DEFERRED_REBUILD NUMBER(1) NOT NULL CHECK (DEFERRED_REBUILD IN (1,0)),
+    INDEX_HANDLER_CLASS VARCHAR2(4000) NULL,
+    INDEX_NAME VARCHAR2(128) NULL,
+    INDEX_TBL_ID NUMBER NULL,
+    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+    ORIG_TBL_ID NUMBER NULL,
+    SD_ID NUMBER NULL
+);
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_PK PRIMARY KEY (INDEX_ID);
+
+-- Table BUCKETING_COLS for join relationship
+CREATE TABLE BUCKETING_COLS
+(
+    SD_ID NUMBER NOT NULL,
+    BUCKET_COL_NAME VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+-- Table TYPE_FIELDS for join relationship
+CREATE TABLE TYPE_FIELDS
+(
+    TYPE_NAME NUMBER NOT NULL,
+    "COMMENT" VARCHAR2(256) NULL,
+    FIELD_NAME VARCHAR2(128) NOT NULL,
+    FIELD_TYPE VARCHAR2(767) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_PK PRIMARY KEY (TYPE_NAME,FIELD_NAME);
+
+-- Table SD_PARAMS for join relationship
+CREATE TABLE SD_PARAMS
+(
+    SD_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY);
+
+-- Table GLOBAL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+CREATE TABLE GLOBAL_PRIVS
+(
+    USER_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    USER_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_ID);
+
+-- Table SDS for classes [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+CREATE TABLE SDS
+(
+    SD_ID NUMBER NOT NULL,
+    CD_ID NUMBER NULL,
+    INPUT_FORMAT VARCHAR2(4000) NULL,
+    IS_COMPRESSED NUMBER(1) NOT NULL CHECK (IS_COMPRESSED IN (1,0)),
+    LOCATION VARCHAR2(4000) NULL,
+    NUM_BUCKETS NUMBER (10) NOT NULL,
+    OUTPUT_FORMAT VARCHAR2(4000) NULL,
+    SERDE_ID NUMBER NULL,
+    IS_STOREDASSUBDIRECTORIES NUMBER(1) NOT NULL CHECK (IS_STOREDASSUBDIRECTORIES IN (1,0))
+);
+
+ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID);
+
+-- Table TABLE_PARAMS for join relationship
+CREATE TABLE TABLE_PARAMS
+(
+    TBL_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY);
+
+-- Table SORT_COLS for join relationship
+CREATE TABLE SORT_COLS
+(
+    SD_ID NUMBER NOT NULL,
+    "COLUMN_NAME" VARCHAR2(128) NULL,
+    "ORDER" NUMBER (10) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+-- Table TBL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+CREATE TABLE TBL_PRIVS
+(
+    TBL_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    TBL_PRIV VARCHAR2(128) NULL,
+    TBL_ID NUMBER NULL
+);
+
+ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_PK PRIMARY KEY (TBL_GRANT_ID);
+
+-- Table DATABASE_PARAMS for join relationship
+CREATE TABLE DATABASE_PARAMS
+(
+    DB_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(180) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID,PARAM_KEY);
+
+-- Table ROLE_MAP for classes [org.apache.hadoop.hive.metastore.model.MRoleMap]
+CREATE TABLE ROLE_MAP
+(
+    ROLE_GRANT_ID NUMBER NOT NULL,
+    ADD_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    ROLE_ID NUMBER NULL
+);
+
+ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_PK PRIMARY KEY (ROLE_GRANT_ID);
+
+-- Table SERDE_PARAMS for join relationship
+CREATE TABLE SERDE_PARAMS
+(
+    SERDE_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY);
+
+-- Table PART_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+CREATE TABLE PART_PRIVS
+(
+    PART_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PART_ID NUMBER NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    PART_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_PK PRIMARY KEY (PART_GRANT_ID);
+
+-- Table DB_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+CREATE TABLE DB_PRIVS
+(
+    DB_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    DB_ID NUMBER NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    DB_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_PK PRIMARY KEY (DB_GRANT_ID);
+
+-- Table TBLS for classes [org.apache.hadoop.hive.metastore.model.MTable]
+CREATE TABLE TBLS
+(
+    TBL_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    DB_ID NUMBER NULL,
+    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+    OWNER VARCHAR2(767) NULL,
+    RETENTION NUMBER (10) NOT NULL,
+    SD_ID NUMBER NULL,
+    TBL_NAME VARCHAR2(128) NULL,
+    TBL_TYPE VARCHAR2(128) NULL,
+    VIEW_EXPANDED_TEXT CLOB NULL,
+    VIEW_ORIGINAL_TEXT CLOB NULL
+);
+
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
+
+-- Table PARTITION_EVENTS for classes [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+CREATE TABLE PARTITION_EVENTS
+(
+    PART_NAME_ID NUMBER NOT NULL,
+    DB_NAME VARCHAR2(128) NULL,
+    EVENT_TIME NUMBER NOT NULL,
+    EVENT_TYPE NUMBER (10) NOT NULL,
+    PARTITION_NAME VARCHAR2(767) NULL,
+    TBL_NAME VARCHAR2(128) NULL
+);
+
+ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID);
+
+-- Table SKEWED_STRING_LIST for classes [org.apache.hadoop.hive.metastore.model.MStringList]
+CREATE TABLE SKEWED_STRING_LIST
+(
+    STRING_LIST_ID NUMBER NOT NULL
+);
+
+ALTER TABLE SKEWED_STRING_LIST ADD CONSTRAINT SKEWED_STRING_LIST_PK PRIMARY KEY (STRING_LIST_ID);
+
+CREATE TABLE SKEWED_STRING_LIST_VALUES
+(
+    STRING_LIST_ID NUMBER NOT NULL,
+    "STRING_LIST_VALUE" VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_PK PRIMARY KEY (STRING_LIST_ID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_COL_NAMES
+(
+    SD_ID NUMBER NOT NULL,
+    "SKEWED_COL_NAME" VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_COL_VALUE_LOC_MAP
+(
+    SD_ID NUMBER NOT NULL,
+    STRING_LIST_ID_KID NUMBER NOT NULL,
+    "LOCATION" VARCHAR2(4000) NULL
+);
+
+CREATE TABLE MASTER_KEYS
+(
+    KEY_ID NUMBER (10) NOT NULL,
+    MASTER_KEY VARCHAR2(767) NULL
+);
+
+CREATE TABLE DELEGATION_TOKENS
+(
+    TOKEN_IDENT VARCHAR2(767) NOT NULL,
+    TOKEN VARCHAR2(767) NULL
+);
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK PRIMARY KEY (SD_ID,STRING_LIST_ID_KID);
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK1 FOREIGN KEY (STRING_LIST_ID_KID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_VALUES
+(
+    SD_ID_OID NUMBER NOT NULL,
+    STRING_LIST_ID_EID NUMBER NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_PK PRIMARY KEY (SD_ID_OID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID_EID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK2 FOREIGN KEY (SD_ID_OID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+-- column statistics
+
+CREATE TABLE TAB_COL_STATS (
+ CS_ID NUMBER NOT NULL,
+ DB_NAME VARCHAR2(128) NOT NULL,
+ TABLE_NAME VARCHAR2(128) NOT NULL,
+ COLUMN_NAME VARCHAR2(128) NOT NULL,
+ COLUMN_TYPE VARCHAR2(128) NOT NULL,
+ TBL_ID NUMBER NOT NULL,
+ LONG_LOW_VALUE NUMBER,
+ LONG_HIGH_VALUE NUMBER,
+ DOUBLE_LOW_VALUE NUMBER,
+ DOUBLE_HIGH_VALUE NUMBER,
+ BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
+ BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
+ NUM_NULLS NUMBER NOT NULL,
+ NUM_DISTINCTS NUMBER,
+ AVG_COL_LEN NUMBER,
+ MAX_COL_LEN NUMBER,
+ NUM_TRUES NUMBER,
+ NUM_FALSES NUMBER,
+ LAST_ANALYZED NUMBER NOT NULL
+);
+
+CREATE TABLE VERSION (
+  VER_ID NUMBER NOT NULL,
+  SCHEMA_VERSION VARCHAR(127) NOT NULL,
+  VERSION_COMMENT VARCHAR(255)
+);
+ALTER TABLE VERSION ADD CONSTRAINT VERSION_PK PRIMARY KEY (VER_ID);
+
+ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PKEY PRIMARY KEY (CS_ID);
+
+ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS(TBL_ID);
+
+CREATE TABLE PART_COL_STATS (
+ CS_ID NUMBER NOT NULL,
+ DB_NAME VARCHAR2(128) NOT NULL,
+ TABLE_NAME VARCHAR2(128) NOT NULL,
+ PARTITION_NAME VARCHAR2(767) NOT NULL,
+ COLUMN_NAME VARCHAR2(128) NOT NULL,
+ COLUMN_TYPE VARCHAR2(128) NOT NULL,
+ PART_ID NUMBER NOT NULL,
+ LONG_LOW_VALUE NUMBER,
+ LONG_HIGH_VALUE NUMBER,
+ DOUBLE_LOW_VALUE NUMBER,
+ DOUBLE_HIGH_VALUE NUMBER,
+ BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
+ BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
+ NUM_NULLS NUMBER NOT NULL,
+ NUM_DISTINCTS NUMBER,
+ AVG_COL_LEN NUMBER,
+ MAX_COL_LEN NUMBER,
+ NUM_TRUES NUMBER,
+ NUM_FALSES NUMBER,
+ LAST_ANALYZED NUMBER NOT NULL
+);
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PKEY PRIMARY KEY (CS_ID);
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED;
+
+CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);
+
+-- Constraints for table PART_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PART_COL_PRIVS_N49 ON PART_COL_PRIVS (PART_ID);
+
+CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table COLUMNS_V2
+ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_FK1 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX COLUMNS_V2_N49 ON COLUMNS_V2 (CD_ID);
+
+
+-- Constraints for table PARTITION_KEY_VALS
+ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_KEY_VALS_N49 ON PARTITION_KEY_VALS (PART_ID);
+
+
+-- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase]
+CREATE UNIQUE INDEX UNIQUE_DATABASE ON DBS ("NAME");
+
+
+-- Constraints for table PARTITION_PARAMS
+ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_PARAMS_N49 ON PARTITION_PARAMS (PART_ID);
+
+
+-- Constraints for table SERDES for class(es) [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+
+-- Constraints for table TYPES for class(es) [org.apache.hadoop.hive.metastore.model.MType]
+CREATE UNIQUE INDEX UNIQUE_TYPE ON TYPES (TYPE_NAME);
+
+
+-- Constraints for table PARTITION_KEYS
+ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEYS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_KEYS_N49 ON PARTITION_KEYS (TBL_ID);
+
+
+-- Constraints for table ROLES for class(es) [org.apache.hadoop.hive.metastore.model.MRole]
+CREATE UNIQUE INDEX ROLEENTITYINDEX ON ROLES (ROLE_NAME);
+
+
+-- Constraints for table PARTITIONS for class(es) [org.apache.hadoop.hive.metastore.model.MPartition]
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITIONS_N49 ON PARTITIONS (SD_ID);
+
+CREATE INDEX PARTITIONS_N50 ON PARTITIONS (TBL_ID);
+
+CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (PART_NAME,TBL_ID);
+
+
+-- Constraints for table INDEX_PARAMS
+ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_FK1 FOREIGN KEY (INDEX_ID) REFERENCES IDXS (INDEX_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX INDEX_PARAMS_N49 ON INDEX_PARAMS (INDEX_ID);
+
+
+-- Constraints for table TBL_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS (TBL_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX TBL_COL_PRIVS_N49 ON TBL_COL_PRIVS (TBL_ID);
+
+
+-- Constraints for table IDXS for class(es) [org.apache.hadoop.hive.metastore.model.MIndex]
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK1 FOREIGN KEY (ORIG_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK3 FOREIGN KEY (INDEX_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE UNIQUE INDEX UNIQUEINDEX ON IDXS (INDEX_NAME,ORIG_TBL_ID);
+
+CREATE INDEX IDXS_N50 ON IDXS (INDEX_TBL_ID);
+
+CREATE INDEX IDXS_N51 ON IDXS (SD_ID);
+
+CREATE INDEX IDXS_N49 ON IDXS (ORIG_TBL_ID);
+
+
+-- Constraints for table BUCKETING_COLS
+ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX BUCKETING_COLS_N49 ON BUCKETING_COLS (SD_ID);
+
+
+-- Constraints for table TYPE_FIELDS
+ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_FK1 FOREIGN KEY (TYPE_NAME) REFERENCES TYPES (TYPES_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TYPE_FIELDS_N49 ON TYPE_FIELDS (TYPE_NAME);
+
+
+-- Constraints for table SD_PARAMS
+ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SD_PARAMS_N49 ON SD_PARAMS (SD_ID);
+
+
+-- Constraints for table GLOBAL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+CREATE UNIQUE INDEX GLOBALPRIVILEGEINDEX ON GLOBAL_PRIVS (PRINCIPAL_NAME,PRINCIPAL_TYPE,USER_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table SDS for class(es) [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+ALTER TABLE SDS ADD CONSTRAINT SDS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
+ALTER TABLE SDS ADD CONSTRAINT SDS_FK2 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SDS_N49 ON SDS (SERDE_ID);
+CREATE INDEX SDS_N50 ON SDS (CD_ID);
+
+
+-- Constraints for table TABLE_PARAMS
+ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID);
+
+
+-- Constraints for table SORT_COLS
+ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SORT_COLS_N49 ON SORT_COLS (SD_ID);
+
+
+-- Constraints for table TBL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TBL_PRIVS_N49 ON TBL_PRIVS (TBL_ID);
+
+CREATE INDEX TABLEPRIVILEGEINDEX ON TBL_PRIVS (TBL_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table DATABASE_PARAMS
+ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX DATABASE_PARAMS_N49 ON DATABASE_PARAMS (DB_ID);
+
+
+-- Constraints for table ROLE_MAP for class(es) [org.apache.hadoop.hive.metastore.model.MRoleMap]
+ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES ROLES (ROLE_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX ROLE_MAP_N49 ON ROLE_MAP (ROLE_ID);
+
+CREATE UNIQUE INDEX USERROLEMAPINDEX ON ROLE_MAP (PRINCIPAL_NAME,ROLE_ID,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table SERDE_PARAMS
+ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SERDE_PARAMS_N49 ON SERDE_PARAMS (SERDE_ID);
+
+
+-- Constraints for table PART_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTPRIVILEGEINDEX ON PART_PRIVS (PART_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX PART_PRIVS_N49 ON PART_PRIVS (PART_ID);
+
+
+-- Constraints for table DB_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+CREATE UNIQUE INDEX DBPRIVILEGEINDEX ON DB_PRIVS (DB_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,DB_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX DB_PRIVS_N49 ON DB_PRIVS (DB_ID);
+
+
+-- Constraints for table TBLS for class(es) [org.apache.hadoop.hive.metastore.model.MTable]
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK2 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TBLS_N49 ON TBLS (DB_ID);
+
+CREATE UNIQUE INDEX UNIQUETABLE ON TBLS (TBL_NAME,DB_ID);
+
+CREATE INDEX TBLS_N50 ON TBLS (SD_ID);
+
+
+-- Constraints for table PARTITION_EVENTS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+CREATE INDEX PARTITIONEVENTINDEX ON PARTITION_EVENTS (PARTITION_NAME);
+
+INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '0.12.0', 'Hive release version 0.12.0');


[05/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HIVE/configuration/hive-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HIVE/configuration/hive-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HIVE/configuration/hive-env.xml
new file mode 100755
index 0000000..2014c78
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HIVE/configuration/hive-env.xml
@@ -0,0 +1,189 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  
+  <!-- hive-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for hive-env.sh file</description>
+    <value>
+ if [ "$SERVICE" = "cli" ]; then
+   {% if java_version &lt; 8 %}
+    if [ -z "$DEBUG" ]; then
+      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
+    else
+      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
+    fi
+  {% else %}
+    if [ -z "$DEBUG" ]; then
+      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:-UseGCOverheadLimit"
+    else
+      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
+    fi
+  {% endif %}
+ fi
+
+# The heap size of the jvm stared by hive shell script can be controlled via:
+
+export HADOOP_HEAPSIZE="{{hive_heapsize}}"
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+
+# Set JAVA HOME
+export JAVA_HOME={{java64_home}}
+
+# Larger heap size may be required when running queries over large number of files or partitions.
+# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
+# appropriate for hive server (hwi etc).
+
+
+# Set HADOOP_HOME to point to a specific hadoop install directory
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+
+# Hive Configuration Directory can be controlled by:
+export HIVE_CONF_DIR={{hive_config_dir}}
+
+# Folder containing extra libraries required for hive compilation/execution can be controlled by:
+if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
+  export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
+elif [ -d "{{hcat_lib}}" ]; then
+  export HIVE_AUX_JARS_PATH={{hcat_lib}}
+fi
+
+# Set HIVE_AUX_JARS_PATH
+export HIVE_AUX_JARS_PATH={{hbase_lib}}/hbase-client.jar,\
+{{hbase_lib}}/hbase-common.jar,\
+{{hbase_lib}}/hbase-hadoop2-compat.jar,\
+{{hbase_lib}}/hbase-prefix-tree.jar,\
+{{hbase_lib}}/hbase-protocol.jar,\
+{{hbase_lib}}/hbase-server.jar,\
+{{hbase_lib}}/htrace-core-3.1.0-incubating.jar,\
+${HIVE_AUX_JARS_PATH}
+
+export METASTORE_PORT={{hive_metastore_port}}
+    </value>
+  </property>
+ 
+  <property>
+    <name>hive_security_authorization</name>
+    <display-name>Choose Authorization</display-name>
+    <value>None</value>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>None</value>
+          <label>None</label>
+        </entry>
+        <entry>
+          <value>SQLStdAuth</value>
+          <label>SQLStdAuth</label>
+        </entry>
+      </entries>
+    </value-attributes>
+  </property>
+  
+  <property>
+    <name>hive_exec_orc_storage_strategy</name>
+    <display-name>ORC Storage Strategy</display-name>
+    <value>SPEED</value>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>SPEED</value>
+          <label>Speed</label>
+        </entry>
+        <entry>
+          <value>COMPRESSION</value>
+          <label>Compression</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive_txn_acid</name>
+    <display-name>ACID Transactions</display-name>
+    <value>off</value>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>on</value>
+          <label>On</label>
+        </entry>
+        <entry>
+          <value>off</value>
+          <label>Off</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+  
+  <property>
+    <name>hive.heapsize</name>
+    <value>1024</value>
+    <description>Hive Java heap size</description>
+    <display-name>HiveServer2 Heap Size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>512</minimum>
+      <maximum>2048</maximum>
+      <unit>MB</unit>
+      <increment-step>512</increment-step>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.client.heapsize</name>
+    <value>512</value>
+    <description>Hive Client Java heap size</description>
+    <display-name>Client Heap Size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>512</minimum>
+      <maximum>2048</maximum>
+      <unit>MB</unit>
+      <increment-step>512</increment-step>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.metastore.heapsize</name>
+    <value>1024</value>
+    <description>Hive Metastore Java heap size</description>
+    <display-name>Metastore Heap Size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>512</minimum>
+      <maximum>2048</maximum>
+      <unit>MB</unit>
+      <increment-step>512</increment-step>
+    </value-attributes>
+  </property>
+  
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HIVE/configuration/hive-site.xml
new file mode 100755
index 0000000..e1a2114
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HIVE/configuration/hive-site.xml
@@ -0,0 +1,338 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+<configuration supports_final="true">
+  
+  <property>
+    <name>hive.security.authenticator.manager</name>
+    <value>org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator</value>
+    <description>Hive client authenticator manager class name. The user-defined authenticator class should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.  </description>
+  </property>
+  
+  <property>
+    <name>hive.security.authorization.manager</name>
+    <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
+    <description>the hive client authorization manager class name.
+    The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
+  </property>
+
+  <property>
+    <name>hive.server2.enable.doAs</name>
+    <value>true</value>
+    <description>
+      Setting this property to true will have HiveServer2 execute
+      Hive operations as the user making the calls to it.
+    </description>
+    <display-name>Run as end user instead of Hive user</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+ 
+  <property>
+    <name>hive.exec.reducers.bytes.per.reducer</name>
+    <value>256000000</value>
+    <description>size per reducer.The default is 256Mb, i.e if the input size is 1G, it will use 4 reducers.</description>
+    <display-name>Data per Reducer</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>64</minimum>
+      <maximum>4294967296</maximum>
+      <unit>B</unit>
+      <step-increment></step-increment>
+    </value-attributes>
+  </property>
+    <property>
+    <name>hive.security.metastore.authorization.manager</name>
+    <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
+    <description>
+      authorization manager class name to be used in the metastore for authorization.
+      The user defined authorization class should implement interface
+      org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider.
+    </description>
+     <display-name>Hive Authorization Manager</display-name>
+    <value-attributes>
+      <type>string</type>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hive.auto.convert.join.noconditionaltask.size</name>
+    <value>2147483648</value>
+    <description>If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. However, if it
+      is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, the join is directly
+      converted to a mapjoin(there is no conditional task). The default is 10MB.
+    </description>
+    <display-name>For Map Join, per Map memory threshold</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>1073741824</minimum>
+      <maximum>17179869184</maximum>
+      <unit>B</unit>
+      <increment-step></increment-step>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.server2.support.dynamic.service.discovery</name>
+    <value>false</value>
+    <description>Whether HiveServer2 supports dynamic service discovery for its clients.
+      To support this, each instance of HiveServer2 currently uses ZooKeeper to register itself,
+      when it is brought up. JDBC/ODBC clients should use the ZooKeeper ensemble: hive.zookeeper.quorum
+      in their connection string.
+    </description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.vectorized.groupby.checkinterval</name>
+    <value>100000</value>
+    <description>Number of entries added to the group by aggregation hash before a recomputation of average entry size is performed.</description>
+  </property>
+ 
+  <property>
+    <name>hive.cbo.enable</name>
+    <value>true</value>
+    <description>Flag to control enabling Cost Based Optimizations using Calcite framework.</description>
+    <display-name>Enable Cost Based Optimizer</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>On</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>Off</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hive.exec.orc.default.stripe.size</name>
+    <value>67108864</value>
+    <description>Define the default ORC stripe size</description>
+    <display-name>Default ORC Stripe Size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>8388608</minimum>
+      <maximum>268435456</maximum>
+      <unit>B</unit>
+      <increment-step>8388608</increment-step>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hive.exec.orc.default.compress</name>
+    <value>ZLIB</value>
+    <description>Define the default compression codec for ORC file</description>
+    <display-name>ORC Compression Algorithm</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>ZLIB</value>
+          <label>zlib Compression Library</label>
+        </entry>
+        <entry>
+          <value>SNAPPY</value>
+          <label>Snappy Compression Library</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+    <property>
+    <name>hive.stats.fetch.column.stats</name>
+    <value>false</value>
+    <description>
+      Annotation of operator tree with statistics information requires column statistics.
+      Column statistics are fetched from metastore. Fetching column statistics for each needed column
+      can be expensive when the number of columns is high. This flag can be used to disable fetching
+      of column statistics from metastore.
+    </description>
+    <display-name>Fetch column stats at compiler</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>On</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>Off</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>cost_based_optimizer</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>hive.server2.authentication</name>
+    <description>Authentication mode, default NONE. Options are NONE, NOSASL, KERBEROS, LDAP, PAM and CUSTOM</description>
+    <value>NONE</value>
+    <display-name>HiveServer2 Authentication</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>NONE</value>
+          <label>None</label>
+        </entry>
+        <entry>
+          <value>LDAP</value>
+          <label>LDAP</label>
+        </entry>
+        <entry>
+          <value>KERBEROS</value>
+          <label>Kerberos</label>
+        </entry>
+        <entry>
+          <value>PAM</value>
+          <label>PAM</label>
+        </entry>
+        <entry>
+          <value>CUSTOM</value>
+          <label>Custom</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hive.server2.use.SSL</name>
+    <value>false</value>
+    <description/>
+    <display-name>Use SSL</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hive.execution.engine</name>
+    <value>mr</value>
+    <description>
+      Chooses execution engine. Option is: mr (Map reduce, default) 
+    </description>
+    <display-name>hive.execution.engine</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>mr</value>
+          <label>MapReduce</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+    <property>
+    <name>hive.compactor.initiator.on</name>
+    <value>false</value>
+    <description>Whether to run the compactor's initiator thread in this metastore instance or not. If there is more than one instance of the thrift metastore this should only be set to true for one of them.</description>
+    <display-name>Run Compactor</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_txn_acid</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>hive.compactor.worker.threads</name>
+    <value>0</value>
+    <description>Number of compactor worker threads to run on this metastore instance. Can be different values on different metastore instances.</description>
+    <display-name>Number of threads used by Compactor</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>20</maximum>
+      <increment-step>1</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_txn_acid</name>
+      </property>
+    </depends-on>
+  </property>
+    <property>
+    <name>hive.default.fileformat</name>
+    <value>TextFile</value>
+    <description>Default file format for CREATE TABLE statement.</description>
+    <display-name>Default File Format</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>ORC</value>
+          <description>The Optimized Row Columnar (ORC) file format provides a highly efficient way to store Hive data. It was designed to overcome limitations of the other Hive file formats. Using ORC files improves performance when Hive is reading, writing, and processing data.</description>
+        </entry>
+        <entry>
+          <value>TextFile</value>
+          <description>Text file format saves Hive data as normal text.</description>
+        </entry>
+      </entries>
+    </value-attributes>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HIVE/metainfo.xml
new file mode 100755
index 0000000..db7d590
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HIVE/metainfo.xml
@@ -0,0 +1,106 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HIVE</name>
+      <version>1.2.1</version>
+
+      <components>
+        <component>
+          <name>HIVE_METASTORE</name>
+          <cardinality>1+</cardinality>          
+        </component>
+
+        <component>
+          <name>HIVE_SERVER</name>
+          <cardinality>1+</cardinality>
+        </component>
+        
+        <component>
+          <name>WEBHCAT_SERVER</name>
+          <cardinality>1+</cardinality>          
+        </component>        
+      </components>
+      
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>hive_4_1_*</name>
+            </package>
+            <package>
+              <name>hive_4_1_*-hcatalog</name>
+            </package>
+            <package>
+              <name>hive_4_1_*-webhcat</name>
+            </package>
+            <package>
+              <name>mysql-connector-java</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>mysql</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>redhat6,ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>mysql-server</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>redhat7</osFamily>
+          <packages>
+            <package>
+              <name>mariadb</name>
+            </package>
+            <package>
+              <name>mariadb-server</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>suse11</osFamily>
+          <packages>
+            <package>
+              <name>mysql-client</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      
+      <themes>
+        <theme>
+          <fileName>theme.json</fileName>
+          <default>true</default>
+        </theme>
+      </themes>
+
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HIVE/themes/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HIVE/themes/theme.json b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HIVE/themes/theme.json
new file mode 100755
index 0000000..5143782
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/HIVE/themes/theme.json
@@ -0,0 +1,327 @@
+{
+  "name": "default",
+  "description": "Default theme for HIVE service",
+  "configuration": {
+    "layouts": [
+      {
+        "name": "default",
+        "tabs": [
+          {
+            "name": "settings",
+            "display-name": "Settings",
+            "layout": {
+              "tab-rows": 2,
+              "tab-columns": 2,
+              "sections": [
+                 {
+                  "name": "acid-transactions",
+                  "display-name": "ACID Transactions",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "acid-transactions-row1-col1-1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "security",
+                  "display-name": "Security",
+                  "row-index": "0",
+                  "column-index": "1",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "security-row1-col1",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "optimization",
+                  "display-name": "Optimization",
+                  "row-index": "1",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "2",
+                  "section-columns": "2",
+                  "section-rows": "2",
+                  "subsections": [
+                    {
+                      "name": "optimization-row1-col1",
+                      "display-name": "Execute Engine",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "optimization-row1-col2",
+                      "display-name": "CBO",
+                      "row-index": "0",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "optimization-row2-col1",
+                      "display-name": "Storage",
+                      "row-index": "1",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    },
+                    {
+                      "name": "optimization-row2-col2",
+                      "display-name": "Memory",
+                      "row-index": "1",
+                      "column-index": "1",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "hive-site/hive.exec.orc.default.stripe.size",
+          "subsection-name": "optimization-row2-col1"
+        },
+        {
+          "config": "hive-site/hive.exec.orc.default.compress",
+          "subsection-name": "optimization-row2-col1"
+        },
+        {
+          "config": "hive-env/hive_exec_orc_storage_strategy",
+          "subsection-name": "optimization-row2-col1"
+        },
+        {
+          "config": "hive-env/hive.heapsize",
+          "subsection-name": "optimization-row2-col2"
+        },
+        {
+          "config": "hive-env/hive.metastore.heapsize",
+          "subsection-name": "optimization-row2-col2"
+        },
+        {
+          "config": "hive-env/hive.client.heapsize",
+          "subsection-name": "optimization-row2-col2"
+        },
+        {
+          "config": "hive-site/hive.auto.convert.join.noconditionaltask.size",
+          "subsection-name": "optimization-row2-col2"
+        },
+        {
+          "config": "hive-site/hive.exec.reducers.bytes.per.reducer",
+          "subsection-name": "optimization-row2-col2"
+        },
+        {
+          "config": "hive-env/hive_txn_acid",
+          "subsection-name": "acid-transactions-row1-col1-1"
+        },
+        {
+          "config": "hive-site/hive.compactor.initiator.on",
+          "subsection-name": "acid-transactions-row1-col1-1"
+        },
+        {
+          "config": "hive-site/hive.compactor.worker.threads",
+          "subsection-name": "acid-transactions-row1-col1-1"
+        },
+        {
+          "config": "hive-site/hive.execution.engine",
+          "subsection-name": "optimization-row1-col1"
+        },
+        {
+          "config": "hive-site/hive.cbo.enable",
+          "subsection-name": "optimization-row1-col2"
+        },
+        {
+          "config": "hive-site/hive.stats.fetch.column.stats",
+          "subsection-name": "optimization-row1-col2"
+        },
+        {
+          "config": "hive-env/hive_security_authorization",
+          "subsection-name": "security-row1-col1"
+        },
+        {
+          "config": "hive-site/hive.server2.enable.doAs",
+          "subsection-name": "security-row1-col1"
+        },
+        {
+          "config": "hive-site/hive.server2.authentication",
+          "subsection-name": "security-row1-col1"
+        },
+        {
+          "config": "hive-site/hive.server2.use.SSL",
+          "subsection-name": "security-row1-col1"
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "hive-site/hive.exec.orc.default.stripe.size",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hive-site/hive.exec.orc.default.compress",
+        "widget": {
+          "type": "combo"
+        }
+      },
+      {
+        "config": "hive-env/hive_exec_orc_storage_strategy",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "hive-env/hive.heapsize",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hive-env/hive.metastore.heapsize",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hive-env/hive.client.heapsize",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hive-site/hive.auto.convert.join.noconditionaltask.size",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hive-site/hive.exec.reducers.bytes.per.reducer",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "MB"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hive-env/hive_txn_acid",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "hive-site/hive.compactor.initiator.on",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "hive-site/hive.compactor.worker.threads",
+        "widget": {
+          "type": "slider",
+          "units": [
+            {
+              "unit-name": "int"
+            }
+          ]
+        }
+      },
+      {
+        "config": "hive-site/hive.execution.engine",
+        "widget": {
+          "type": "combo"
+        }
+      },
+      {
+        "config": "hive-site/hive.cbo.enable",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "hive-site/hive.stats.fetch.column.stats",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "hive-site/hive.server2.enable.doAs",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "hive-env/hive_security_authorization",
+        "widget": {
+          "type": "combo"
+        }
+      },
+      {
+        "config": "hive-site/hive.server2.authentication",
+        "widget": {
+          "type": "combo"
+        }
+      },
+      {
+        "config": "hive-site/hive.server2.use.SSL",
+        "widget": {
+          "type": "toggle"
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/KAFKA/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/KAFKA/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/KAFKA/metainfo.xml
new file mode 100755
index 0000000..0a919a1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/KAFKA/metainfo.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>KAFKA</name>
+      <version>0.8.2.1</version>
+    </service>
+  </services>
+</metainfo>
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/KERBEROS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/KERBEROS/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/KERBEROS/metainfo.xml
new file mode 100755
index 0000000..e4b2567
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/KERBEROS/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>KERBEROS</name>
+      <version>1.10.3</version>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/KNOX/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/KNOX/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/KNOX/metainfo.xml
new file mode 100755
index 0000000..4c110cc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/KNOX/metainfo.xml
@@ -0,0 +1,46 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>KNOX</name>
+      <displayName>Knox</displayName>
+      <comment>Provides a single point of authentication and access for Apache Hadoop services in a cluster</comment>
+      <version>0.6.0</version>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>redhat7,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>knox_4_1_*</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>knox_4_1_*</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/OOZIE/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/OOZIE/configuration/oozie-site.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/OOZIE/configuration/oozie-site.xml
new file mode 100755
index 0000000..ff82a35
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/OOZIE/configuration/oozie-site.xml
@@ -0,0 +1,64 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+<configuration supports_final="true">
+    <property>
+        <name>oozie.services</name>
+        <value>
+            org.apache.oozie.service.SchedulerService,
+            org.apache.oozie.service.InstrumentationService,
+            org.apache.oozie.service.MemoryLocksService,
+            org.apache.oozie.service.UUIDService,
+            org.apache.oozie.service.ELService,
+            org.apache.oozie.service.AuthorizationService,
+            org.apache.oozie.service.UserGroupInformationService,
+            org.apache.oozie.service.HadoopAccessorService,
+            org.apache.oozie.service.JobsConcurrencyService,
+            org.apache.oozie.service.URIHandlerService,
+            org.apache.oozie.service.DagXLogInfoService,
+            org.apache.oozie.service.SchemaService,
+            org.apache.oozie.service.LiteWorkflowAppService,
+            org.apache.oozie.service.JPAService,
+            org.apache.oozie.service.StoreService,
+            org.apache.oozie.service.SLAStoreService,
+            org.apache.oozie.service.DBLiteWorkflowStoreService,
+            org.apache.oozie.service.CallbackService,
+            org.apache.oozie.service.ActionService,
+            org.apache.oozie.service.ShareLibService,
+            org.apache.oozie.service.CallableQueueService,
+            org.apache.oozie.service.ActionCheckerService,
+            org.apache.oozie.service.RecoveryService,
+            org.apache.oozie.service.PurgeService,
+            org.apache.oozie.service.CoordinatorEngineService,
+            org.apache.oozie.service.BundleEngineService,
+            org.apache.oozie.service.DagEngineService,
+            org.apache.oozie.service.CoordMaterializeTriggerService,
+            org.apache.oozie.service.StatusTransitService,
+            org.apache.oozie.service.PauseTransitService,
+            org.apache.oozie.service.GroupsService,
+            org.apache.oozie.service.ProxyUserService,
+            org.apache.oozie.service.XLogStreamingService,
+            org.apache.oozie.service.JvmPauseMonitorService,
+            org.apache.oozie.service.SparkConfigurationService
+        </value>
+        <description>
+            All services to be created and managed by Oozie Services singleton.
+            Class names must be separated by commas.
+        </description>
+    </property>	
+</configuration>	
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/OOZIE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/OOZIE/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/OOZIE/metainfo.xml
new file mode 100755
index 0000000..c211f3a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/OOZIE/metainfo.xml
@@ -0,0 +1,144 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>OOZIE</name>
+      <version>4.2.0</version>
+       <components>
+        <component>
+          <name>OOZIE_SERVER</name>
+          <displayName>Oozie Server</displayName>
+          <category>MASTER</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>YARN/YARN_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/oozie_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>OOZIE_CLIENT</name>
+          <displayName>Oozie Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/oozie_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>xml</type>
+              <fileName>oozie-site.xml</fileName>
+              <dictionaryName>oozie-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>oozie-env.sh</fileName>
+              <dictionaryName>oozie-env</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>oozie-log4j.properties</fileName>
+              <dictionaryName>oozie-log4j</dictionaryName>
+            </configFile>            
+          </configFiles>
+        </component>
+       </components>
+       <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>zip</name>
+            </package>
+            <package>
+              <name>mysql-connector-java</name>
+            </package>
+            <package>
+              <name>extjs</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>redhat7,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>oozie_4_1_*</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>oozie_4_1_*</name>
+            </package>
+            <package>
+              <name>libxml2-utils</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/PIG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/PIG/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/PIG/metainfo.xml
new file mode 100755
index 0000000..544a1ad
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/PIG/metainfo.xml
@@ -0,0 +1,38 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>PIG</name>
+      <version>0.15.0</version>
+      
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>pig_4_1_*</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/SLIDER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/SLIDER/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/SLIDER/metainfo.xml
new file mode 100755
index 0000000..944a38c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/SLIDER/metainfo.xml
@@ -0,0 +1,46 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>SLIDER</name>
+      <version>0.80.0</version>
+      
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>redhat7,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>slider_4_1_*</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>slider_4_1_*</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/SOLR/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/SOLR/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/SOLR/metainfo.xml
new file mode 100755
index 0000000..cb591b2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/SOLR/metainfo.xml
@@ -0,0 +1,29 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>SOLR</name>
+      <version>5.1.0</version>
+    </service>
+  </services>
+</metainfo>
+
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/SPARK/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/SPARK/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/SPARK/metainfo.xml
new file mode 100755
index 0000000..d8f5920
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/SPARK/metainfo.xml
@@ -0,0 +1,52 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>SPARK</name>
+      <version>1.4.1</version>
+      
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>redhat7,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>spark-core_4_1_0*</name>
+            </package>
+            <package>
+              <name>spark_4_1_0*</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>spark-core_4_1_0*</name>
+            </package>
+            <package>
+               <name>spark_4_1_0*</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/SQOOP/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/SQOOP/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/SQOOP/metainfo.xml
new file mode 100755
index 0000000..7e247ca
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/SQOOP/metainfo.xml
@@ -0,0 +1,45 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>SQOOP</name>
+      <version>1.4.6</version>
+      
+       <requiredServices>
+        <service>YARN</service>
+       </requiredServices>
+
+       <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>sqoop_4_1_*</name>
+            </package>
+            <package>
+              <name>mysql-connector-java</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/YARN/YARN_widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/YARN/YARN_widgets.json b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/YARN/YARN_widgets.json
new file mode 100755
index 0000000..5179470
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/YARN/YARN_widgets.json
@@ -0,0 +1,676 @@
+{
+  "layouts": [
+    {
+      "layout_name": "default_yarn_dashboard",
+      "display_name": "Standard YARN Dashboard",
+      "section_name": "YARN_SUMMARY",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "Memory Utilization",
+          "description": "Percentage of total memory allocated to containers running in the cluster.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AllocatedMB",
+              "metric_path": "metrics/yarn/Queue/root/AllocatedMB",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AvailableMB",
+              "metric_path": "metrics/yarn/Queue/root/AvailableMB",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            }
+          ],
+          "values": [
+            {
+              "name": "Memory Utilization",
+              "value": "${(yarn.QueueMetrics.Queue=root.AllocatedMB / (yarn.QueueMetrics.Queue=root.AllocatedMB + yarn.QueueMetrics.Queue=root.AvailableMB)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "CPU Utilization",
+          "description": "Percentage of total virtual cores allocated to containers running in the cluster.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AllocatedVCores",
+              "metric_path": "metrics/yarn/Queue/root/AllocatedVCores",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AvailableVCores",
+              "metric_path": "metrics/yarn/Queue/root/AvailableVCores",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            }
+          ],
+          "values": [
+            {
+              "name": "Total Allocatable CPU Utilized across NodeManager",
+              "value": "${(yarn.QueueMetrics.Queue=root.AllocatedVCores / (yarn.QueueMetrics.Queue=root.AllocatedVCores + yarn.QueueMetrics.Queue=root.AvailableVCores)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Bad Local Disks",
+          "description": "Number of unhealthy local disks across all NodeManagers.",
+          "widget_type": "NUMBER",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.BadLocalDirs",
+              "metric_path": "metrics/yarn/BadLocalDirs",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.BadLogDirs",
+              "metric_path": "metrics/yarn/BadLogDirs",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Number of unhealthy local disks for NodeManager",
+              "value": "${yarn.NodeManagerMetrics.BadLocalDirs + yarn.NodeManagerMetrics.BadLogDirs}"
+            }
+          ],
+          "properties": {
+            "display_unit": ""
+          }
+        },
+        {
+          "widget_name": "Container Failures",
+          "description": "Percentage of all containers failing in the cluster.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersFailed._sum",
+              "metric_path": "metrics/yarn/ContainersFailed._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersCompleted._sum",
+              "metric_path": "metrics/yarn/ContainersCompleted._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersLaunched._sum",
+              "metric_path": "metrics/yarn/ContainersLaunched._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersIniting._sum",
+              "metric_path": "metrics/yarn/ContainersIniting._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersKilled._sum",
+              "metric_path": "metrics/yarn/ContainersKilled._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersRunning._sum",
+              "metric_path": "metrics/yarn/ContainersRunning._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Container Failures",
+              "value": "${(yarn.NodeManagerMetrics.ContainersFailed._sum/(yarn.NodeManagerMetrics.ContainersFailed._sum + yarn.NodeManagerMetrics.ContainersCompleted._sum + yarn.NodeManagerMetrics.ContainersLaunched._sum + yarn.NodeManagerMetrics.ContainersIniting._sum + yarn.NodeManagerMetrics.ContainersKilled._sum + yarn.NodeManagerMetrics.ContainersRunning._sum)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "App Failures",
+          "description": "Percentage of all launched applications failing in the cluster.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsFailed",
+              "metric_path": "metrics/yarn/Queue/root/AppsFailed",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsKilled",
+              "metric_path": "metrics/yarn/Queue/root/AppsKilled",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsPending",
+              "metric_path": "metrics/yarn/Queue/root/AppsPending",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsRunning",
+              "metric_path": "metrics/yarn/Queue/root/AppsRunning",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsSubmitted",
+              "metric_path": "metrics/yarn/Queue/root/AppsSubmitted",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            },
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsCompleted",
+              "metric_path": "metrics/yarn/Queue/root/AppsCompleted",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            }
+          ],
+          "values": [
+            {
+              "name": "App Failures",
+              "value": "${(yarn.QueueMetrics.Queue=root.AppsFailed/(yarn.QueueMetrics.Queue=root.AppsFailed + yarn.QueueMetrics.Queue=root.AppsKilled + yarn.QueueMetrics.Queue=root.AppsPending + yarn.QueueMetrics.Queue=root.AppsRunning + yarn.QueueMetrics.Queue=root.AppsSubmitted + yarn.QueueMetrics.Queue=root.AppsCompleted)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Pending Apps",
+          "description": "Count of applications waiting for cluster resources to become available.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "yarn.QueueMetrics.Queue=root.AppsPending",
+              "metric_path": "metrics/yarn/Queue/root/AppsPending",
+              "service_name": "YARN",
+              "component_name": "RESOURCEMANAGER",
+              "host_component_criteria": "host_components/HostRoles/ha_state=ACTIVE"
+            }
+          ],
+          "values": [
+            {
+              "name": "Pending Apps",
+              "value": "${yarn.QueueMetrics.Queue=root.AppsPending}"
+            }
+          ],
+          "properties": {
+            "display_unit": "Apps",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Cluster Memory",
+          "description": "Percentage of memory used across all NodeManager hosts.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "mem_total._sum",
+              "metric_path": "metrics/memory/mem_total._avg",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "mem_free._sum",
+              "metric_path": "metrics/memory/mem_free._avg",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "mem_cached._sum",
+              "metric_path": "metrics/memory/mem_cached._avg",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Memory utilization",
+              "value": "${((mem_total._sum - mem_free._sum - mem_cached._sum)/mem_total._sum) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Cluster Disk",
+          "description": "Sum of disk throughput for all NodeManager hosts.",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "read_bps._sum",
+              "metric_path": "metrics/disk/read_bps._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "write_bps._sum",
+              "metric_path": "metrics/disk/write_bps._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Read throughput",
+              "value": "${read_bps._sum/1048576}"
+            },
+            {
+              "name": "Write throughput",
+              "value": "${write_bps._sum/1048576}"
+            }
+          ],
+          "properties": {
+            "display_unit": "Mbps",
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Cluster Network",
+          "description": "Average of Network utilized across all NodeManager hosts.",
+          "default_section_name": "YARN_SUMMARY",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "pkts_in._avg",
+              "metric_path": "metrics/network/pkts_in._avg",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "pkts_out._avg",
+              "metric_path": "metrics/network/pkts_out._avg",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Packets In",
+              "value": "${pkts_in._avg}"
+            },
+            {
+              "name": "Packets Out",
+              "value": "${pkts_out._avg}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1"
+          }
+        },
+        {
+          "widget_name": "Cluster CPU",
+          "description": "Percentage of CPU utilized across all NodeManager hosts.",
+          "default_section_name": "YARN_SUMMARY",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "cpu_system._sum",
+              "metric_path": "metrics/cpu/cpu_system._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "cpu_user._sum",
+              "metric_path": "metrics/cpu/cpu_user._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "cpu_nice._sum",
+              "metric_path": "metrics/cpu/cpu_nice._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "cpu_idle._sum",
+              "metric_path": "metrics/cpu/cpu_idle._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "cpu_wio._sum",
+              "metric_path": "metrics/cpu/cpu_wio._sum",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "CPU utilization",
+              "value": "${((cpu_system._sum + cpu_user._sum + cpu_nice._sum)/(cpu_system._sum + cpu_user._sum + cpu_nice._sum + cpu_idle._sum + cpu_wio._sum)) * 100}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1",
+            "display_unit": "%"
+          }
+        }
+      ]
+    },
+    {
+      "layout_name": "default_yarn_heatmap",
+      "display_name": "YARN Heatmaps",
+      "section_name": "YARN_HEATMAPS",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "YARN local disk space utilization per NodeManager",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.GoodLocalDirsDiskUtilizationPerc",
+              "metric_path": "metrics/yarn/GoodLocalDirsDiskUtilizationPerc",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.GoodLogDirsDiskUtilizationPerc",
+              "metric_path": "metrics/yarn/GoodLogDirsDiskUtilizationPerc",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "YARN local disk space utilization per NodeManager",
+              "value": "${(yarn.NodeManagerMetrics.GoodLocalDirsDiskUtilizationPerc + yarn.NodeManagerMetrics.GoodLogDirsDiskUtilizationPerc)/2}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Total Allocatable RAM Utilized per NodeManager",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.AllocatedGB",
+              "metric_path": "metrics/yarn/AllocatedGB",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.AvailableGB",
+              "metric_path": "metrics/yarn/AvailableGB",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Total Allocatable RAM Utilized per NodeManager",
+              "value": "${(yarn.NodeManagerMetrics.AllocatedGB/(yarn.NodeManagerMetrics.AvailableGB + yarn.NodeManagerMetrics.AllocatedGB)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Total Allocatable CPU Utilized per NodeManager",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.AllocatedVCores",
+              "metric_path": "metrics/yarn/AllocatedVCores",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.AvailableVCores",
+              "metric_path": "metrics/yarn/AvailableVCores",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Total Allocatable CPU Utilized per NodeManager",
+              "value": "${(yarn.NodeManagerMetrics.AllocatedVCores/(yarn.NodeManagerMetrics.AllocatedVCores + yarn.NodeManagerMetrics.AvailableVCores)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Container Failures",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersFailed",
+              "metric_path": "metrics/yarn/ContainersFailed",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersCompleted",
+              "metric_path": "metrics/yarn/ContainersCompleted",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersLaunched",
+              "metric_path": "metrics/yarn/ContainersLaunched",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersIniting",
+              "metric_path": "metrics/yarn/ContainersIniting",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersKilled",
+              "metric_path": "metrics/yarn/ContainersKilled",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            },
+            {
+              "name": "yarn.NodeManagerMetrics.ContainersRunning",
+              "metric_path": "metrics/yarn/ContainersRunning",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Container Failures",
+              "value": "${(yarn.NodeManagerMetrics.ContainersFailed/(yarn.NodeManagerMetrics.ContainersFailed + yarn.NodeManagerMetrics.ContainersCompleted + yarn.NodeManagerMetrics.ContainersLaunched + yarn.NodeManagerMetrics.ContainersIniting + yarn.NodeManagerMetrics.ContainersKilled + yarn.NodeManagerMetrics.ContainersRunning)) * 100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "NodeManager GC Time",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=NodeManager,name=JvmMetrics.GcTimeMillis",
+              "metric_path": "metrics/jvm/gcTimeMillis",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "NodeManager Garbage Collection Time",
+              "value": "${Hadoop:service=NodeManager,name=JvmMetrics.GcTimeMillis}"
+            }
+          ],
+          "properties": {
+            "display_unit": "ms",
+            "max_limit": "10000"
+          }
+        },
+        {
+          "widget_name": "NodeManager JVM Heap Memory Used",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "Hadoop:service=NodeManager,name=JvmMetrics.MemHeapUsedM",
+              "metric_path": "metrics/jvm/memHeapUsedM",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "NodeManager JVM Heap Memory Used",
+              "value": "${Hadoop:service=NodeManager,name=JvmMetrics.MemHeapUsedM}"
+            }
+          ],
+          "properties": {
+            "display_unit": "MB",
+            "max_limit": "512"
+          }
+        },
+        {
+          "widget_name": "Allocated Containers",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.AllocatedContainers",
+              "metric_path": "metrics/yarn/AllocatedContainers",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "Allocated Containers",
+              "value": "${yarn.NodeManagerMetrics.AllocatedContainers}"
+            }
+          ],
+          "properties": {
+            "display_unit": "",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "NodeManager RAM Utilized",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.AllocatedGB",
+              "metric_path": "metrics/yarn/AllocatedGB",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "NodeManager RAM Utilized",
+              "value": "${yarn.NodeManagerMetrics.AllocatedGB}"
+            }
+          ],
+          "properties": {
+            "display_unit": "",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "NodeManager CPU Utilized",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "yarn.NodeManagerMetrics.AllocatedVCores",
+              "metric_path": "metrics/yarn/AllocatedVCores",
+              "service_name": "YARN",
+              "component_name": "NODEMANAGER"
+            }
+          ],
+          "values": [
+            {
+              "name": "NodeManager CPU Utilized",
+              "value": "${yarn.NodeManagerMetrics.AllocatedVCores}"
+            }
+          ],
+          "properties": {
+            "display_unit": "",
+            "max_limit": "100"
+          }
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/YARN/configuration-mapred/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/YARN/configuration-mapred/mapred-site.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/YARN/configuration-mapred/mapred-site.xml
new file mode 100755
index 0000000..bd5a911
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/YARN/configuration-mapred/mapred-site.xml
@@ -0,0 +1,50 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
+
+  <property>
+    <name>mapreduce.jobhistory.recovery.enable</name>
+    <value>true</value>
+    <description>Enable the history server to store server state and recover
+      server state upon startup.  If enabled then
+      mapreduce.jobhistory.recovery.store.class must be specified.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.jobhistory.recovery.store.class</name>
+    <value>org.apache.hadoop.mapreduce.v2.hs.HistoryServerLeveldbStateStoreService</value>
+    <description>The HistoryServerStateStoreService class to store history server
+      state for recovery.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.jobhistory.recovery.store.leveldb.path</name>
+    <value>/hadoop/mapreduce/jhs</value>
+    <description>The URI where history server state will be stored if HistoryServerLeveldbSystemStateStoreService
+      is configured as the recovery storage class.
+    </description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/YARN/configuration/yarn-site.xml
new file mode 100755
index 0000000..12a8a21
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.1/services/YARN/configuration/yarn-site.xml
@@ -0,0 +1,43 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
+
+  <property>
+    <name>yarn.node-labels.manager-class</name>
+    <deleted>true</deleted>
+  </property>
+
+  <property>
+    <name>yarn.timeline-service.recovery.enabled</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>yarn.admin.acl</name>
+    <value>yarn</value>
+    <description> ACL of who can be admin of the YARN cluster. </description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+
+</configuration>
\ No newline at end of file


[46/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/alerts/alert_ambari_metrics_monitor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/alerts/alert_ambari_metrics_monitor.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/alerts/alert_ambari_metrics_monitor.py
new file mode 100755
index 0000000..fa44a7f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/alerts/alert_ambari_metrics_monitor.py
@@ -0,0 +1,104 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import os
+import socket
+
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.core.exceptions import ComponentIsNotRunning
+from ambari_commons import OSCheck, OSConst
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+
+if OSCheck.is_windows_family():
+  from resource_management.libraries.functions.windows_service_utils import check_windows_service_status
+RESULT_CODE_OK = 'OK'
+RESULT_CODE_CRITICAL = 'CRITICAL'
+RESULT_CODE_UNKNOWN = 'UNKNOWN'
+
+AMS_MONITOR_PID_DIR = '{{ams-env/metrics_monitor_pid_dir}}'
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (AMS_MONITOR_PID_DIR,)
+
+@OsFamilyFuncImpl(OSConst.WINSRV_FAMILY)
+def is_monitor_process_live(pid_file=None):
+  """
+  Gets whether the Metrics Monitor Service is running.
+  :param pid_file: ignored
+  :return: True if the monitor is running, False otherwise
+  """
+  try:
+    check_windows_service_status("AmbariMetricsHostMonitoring")
+    ams_monitor_process_running = True
+  except:
+    ams_monitor_process_running = False
+  return ams_monitor_process_running
+
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def is_monitor_process_live(pid_file):
+  """
+  Gets whether the Metrics Monitor represented by the specified file is running.
+  :param pid_file: the PID file of the monitor to check
+  :return: True if the monitor is running, False otherwise
+  """
+  live = False
+
+  try:
+    check_process_status(pid_file)
+    live = True
+  except ComponentIsNotRunning:
+    pass
+
+  return live
+
+
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  if configurations is None:
+    return (RESULT_CODE_UNKNOWN, ['There were no configurations supplied to the script.'])
+
+  if set([AMS_MONITOR_PID_DIR]).issubset(configurations):
+    AMS_MONITOR_PID_PATH = os.path.join(configurations[AMS_MONITOR_PID_DIR], 'ambari-metrics-monitor.pid')
+  else:
+    return (RESULT_CODE_UNKNOWN, ['The ams_monitor_pid_dir is a required parameter.'])
+
+  if host_name is None:
+    host_name = socket.getfqdn()
+
+  ams_monitor_process_running = is_monitor_process_live(AMS_MONITOR_PID_PATH)
+
+  alert_state = RESULT_CODE_OK if ams_monitor_process_running else RESULT_CODE_CRITICAL
+
+  alert_label = 'Ambari Monitor is running on {0}' if ams_monitor_process_running else 'Ambari Monitor is NOT running on {0}'
+  alert_label = alert_label.format(host_name)
+
+  return (alert_state, [alert_label])

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/files/hbaseSmokeVerify.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/files/hbaseSmokeVerify.sh b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/files/hbaseSmokeVerify.sh
new file mode 100755
index 0000000..5c320c0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/files/hbaseSmokeVerify.sh
@@ -0,0 +1,34 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+conf_dir=$1
+data=$2
+hbase_cmd=$3
+echo "scan 'ambarismoketest'" | $hbase_cmd --config $conf_dir shell > /tmp/hbase_chk_verify
+cat /tmp/hbase_chk_verify
+echo "Looking for $data"
+grep -q $data /tmp/hbase_chk_verify
+if [ "$?" -ne 0 ]
+then
+  exit 1
+fi
+
+grep -q '1 row(s)' /tmp/hbase_chk_verify

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/files/service-metrics/AMBARI_METRICS.txt
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/files/service-metrics/AMBARI_METRICS.txt b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/files/service-metrics/AMBARI_METRICS.txt
new file mode 100755
index 0000000..6693503
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/files/service-metrics/AMBARI_METRICS.txt
@@ -0,0 +1,245 @@
+regionserver.WAL.SyncTime_min
+regionserver.WAL.SyncTime_num_ops
+regionserver.WAL.appendCount
+regionserver.WAL.slowAppendCount
+jvm.JvmMetrics.GcTimeMillis
+jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep
+jvm.JvmMetrics.GcTimeMillisParNew
+ugi.UgiMetrics.GetGroupsAvgTime
+ugi.UgiMetrics.GetGroupsNumOps
+ugi.UgiMetrics.LoginFailureNumOps
+ugi.UgiMetrics.LoginSuccessAvgTime
+ugi.UgiMetrics.LoginSuccessNumOps
+ugi.UgiMetrics.LoginFailureAvgTime
+jvm.JvmMetrics.LogError
+jvm.JvmMetrics.LogFatal
+jvm.JvmMetrics.LogInfo
+jvm.JvmMetrics.LogWarn
+jvm.JvmMetrics.MemHeapCommittedM
+jvm.JvmMetrics.MemHeapMaxM
+jvm.JvmMetrics.MemHeapUsedM
+jvm.JvmMetrics.MemMaxM
+jvm.JvmMetrics.MemNonHeapCommittedM
+jvm.JvmMetrics.MemNonHeapMaxM
+jvm.JvmMetrics.MemNonHeapUsedM
+jvm.JvmMetrics.ThreadsBlocked
+jvm.JvmMetrics.ThreadsNew
+jvm.JvmMetrics.ThreadsRunnable
+jvm.JvmMetrics.ThreadsTerminated
+jvm.JvmMetrics.ThreadsTimedWaiting
+master.AssignmentManger.Assign_75th_percentile
+master.AssignmentManger.Assign_95th_percentile
+master.AssignmentManger.Assign_99th_percentile
+master.AssignmentManger.Assign_max
+master.AssignmentManger.Assign_mean
+master.AssignmentManger.Assign_median
+master.AssignmentManger.Assign_min
+jvm.JvmMetrics.ThreadsWaiting
+master.AssignmentManger.Assign_num_ops
+master.AssignmentManger.BulkAssign_75th_percentile
+master.AssignmentManger.BulkAssign_95th_percentile
+master.AssignmentManger.BulkAssign_99th_percentile
+master.AssignmentManger.BulkAssign_max
+master.AssignmentManger.BulkAssign_mean
+master.AssignmentManger.BulkAssign_median
+master.AssignmentManger.BulkAssign_min
+master.AssignmentManger.BulkAssign_num_ops
+master.AssignmentManger.ritCount
+master.AssignmentManger.ritCountOverThreshold
+master.AssignmentManger.ritOldestAge
+master.Balancer.BalancerCluster_75th_percentile
+master.Balancer.BalancerCluster_95th_percentile
+master.Balancer.BalancerCluster_99th_percentile
+master.Balancer.BalancerCluster_max
+master.Balancer.BalancerCluster_mean
+master.Balancer.BalancerCluster_median
+master.Balancer.BalancerCluster_min
+master.Balancer.BalancerCluster_num_ops
+master.Balancer.miscInvocationCount
+master.FileSystem.HlogSplitSize_75th_percentile
+master.FileSystem.HlogSplitSize_95th_percentile
+master.FileSystem.HlogSplitSize_99th_percentile
+master.FileSystem.HlogSplitSize_max
+master.FileSystem.HlogSplitSize_mean
+master.FileSystem.HlogSplitSize_median
+master.FileSystem.HlogSplitSize_min
+master.FileSystem.HlogSplitSize_num_ops
+master.FileSystem.HlogSplitTime_75th_percentile
+master.FileSystem.HlogSplitTime_95th_percentile
+master.FileSystem.HlogSplitTime_99th_percentile
+master.FileSystem.HlogSplitTime_max
+master.FileSystem.HlogSplitTime_mean
+master.FileSystem.HlogSplitTime_median
+master.FileSystem.HlogSplitTime_min
+master.FileSystem.HlogSplitTime_num_ops
+master.FileSystem.MetaHlogSplitSize_75th_percentile
+master.FileSystem.MetaHlogSplitSize_95th_percentile
+master.FileSystem.MetaHlogSplitSize_99th_percentile
+master.FileSystem.MetaHlogSplitSize_max
+master.FileSystem.MetaHlogSplitSize_mean
+master.FileSystem.MetaHlogSplitSize_median
+master.FileSystem.MetaHlogSplitSize_min
+master.FileSystem.MetaHlogSplitSize_num_ops
+master.FileSystem.MetaHlogSplitTime_75th_percentile
+master.FileSystem.MetaHlogSplitTime_95th_percentile
+master.FileSystem.MetaHlogSplitTime_99th_percentile
+master.FileSystem.MetaHlogSplitTime_max
+master.FileSystem.MetaHlogSplitTime_mean
+master.FileSystem.MetaHlogSplitTime_median
+master.FileSystem.MetaHlogSplitTime_min
+master.FileSystem.MetaHlogSplitTime_num_ops
+master.Server.averageLoad
+master.Server.clusterRequests
+master.Server.masterActiveTime
+master.Server.masterStartTime
+master.Server.numDeadRegionServers
+master.Server.numRegionServers
+metricssystem.MetricsSystem.DroppedPubAll
+metricssystem.MetricsSystem.NumActiveSinks
+ipc.IPC.ProcessCallTime_75th_percentile
+ipc.IPC.ProcessCallTime_95th_percentile
+metricssystem.MetricsSystem.NumActiveSources
+metricssystem.MetricsSystem.NumAllSinks
+ipc.IPC.ProcessCallTime_99th_percentile
+metricssystem.MetricsSystem.NumAllSources
+metricssystem.MetricsSystem.PublishAvgTime
+metricssystem.MetricsSystem.PublishNumOps
+ipc.IPC.ProcessCallTime_max
+ipc.IPC.ProcessCallTime_mean
+metricssystem.MetricsSystem.Sink_timelineAvgTime
+ipc.IPC.ProcessCallTime_median
+metricssystem.MetricsSystem.Sink_timelineDropped
+metricssystem.MetricsSystem.Sink_timelineNumOps
+ipc.IPC.ProcessCallTime_num_ops
+metricssystem.MetricsSystem.Sink_timelineQsize
+metricssystem.MetricsSystem.SnapshotAvgTime
+ipc.IPC.QueueCallTime_95th_percentile
+metricssystem.MetricsSystem.SnapshotNumOps
+ipc.IPC.ProcessCallTime_min
+ipc.IPC.QueueCallTime_75th_percentile
+ipc.IPC.QueueCallTime_99th_percentile
+ipc.IPC.QueueCallTime_max
+ipc.IPC.QueueCallTime_mean
+ipc.IPC.QueueCallTime_median
+ipc.IPC.QueueCallTime_min
+regionserver.Server.Append_75th_percentile
+regionserver.Server.Append_95th_percentile
+ipc.IPC.QueueCallTime_num_ops
+ipc.IPC.authenticationFailures
+regionserver.Server.Append_99th_percentile
+regionserver.Server.Append_max
+ipc.IPC.authenticationSuccesses
+regionserver.Server.Append_mean
+regionserver.Server.Append_median
+regionserver.Server.Append_min
+regionserver.Server.Append_num_ops
+regionserver.Server.Delete_75th_percentile
+regionserver.Server.Delete_95th_percentile
+ipc.IPC.authorizationFailures
+regionserver.Server.Delete_99th_percentile
+regionserver.Server.Delete_max
+regionserver.Server.Delete_mean
+regionserver.Server.Delete_median
+regionserver.Server.Delete_min
+regionserver.Server.Delete_num_ops
+ipc.IPC.authorizationSuccesses
+ipc.IPC.numActiveHandler
+ipc.IPC.numCallsInGeneralQueue
+regionserver.Server.Get_75th_percentile
+regionserver.Server.Get_95th_percentile
+regionserver.Server.Get_99th_percentile
+regionserver.Server.Get_max
+regionserver.Server.Get_mean
+regionserver.Server.Get_median
+ipc.IPC.numCallsInPriorityQueue
+regionserver.Server.Get_min
+regionserver.Server.Get_num_ops
+regionserver.Server.Increment_75th_percentile
+regionserver.Server.Increment_95th_percentile
+regionserver.Server.Increment_99th_percentile
+regionserver.Server.Increment_max
+regionserver.Server.Increment_mean
+regionserver.Server.Increment_median
+ipc.IPC.numCallsInReplicationQueue
+ipc.IPC.numOpenConnections
+regionserver.Server.Increment_min
+regionserver.Server.Increment_num_ops
+ipc.IPC.queueSize
+regionserver.Server.Mutate_75th_percentile
+regionserver.Server.Mutate_95th_percentile
+regionserver.Server.Mutate_99th_percentile
+regionserver.Server.Mutate_max
+regionserver.Server.Mutate_mean
+regionserver.Server.Mutate_median
+ipc.IPC.receivedBytes
+regionserver.Server.Mutate_min
+regionserver.Server.Mutate_num_ops
+regionserver.Server.Replay_75th_percentile
+regionserver.Server.Replay_95th_percentile
+regionserver.Server.Replay_99th_percentile
+regionserver.Server.Replay_max
+regionserver.Server.Replay_mean
+regionserver.Server.Replay_median
+ipc.IPC.sentBytes
+jvm.JvmMetrics.GcCount
+regionserver.Server.Replay_min
+regionserver.Server.Replay_num_ops
+regionserver.Server.blockCacheCount
+regionserver.Server.blockCacheEvictionCount
+regionserver.Server.blockCacheExpressHitPercent
+regionserver.Server.blockCacheFreeSize
+regionserver.Server.blockCacheHitCount
+regionserver.Server.blockCacheMissCount
+regionserver.Server.blockCacheSize
+regionserver.Server.blockCountHitPercent
+regionserver.Server.checkMutateFailedCount
+regionserver.Server.checkMutatePassedCount
+regionserver.Server.compactionQueueLength
+regionserver.Server.flushQueueLength
+jvm.JvmMetrics.GcCountConcurrentMarkSweep
+regionserver.Server.hlogFileCount
+regionserver.Server.hlogFileSize
+regionserver.Server.memStoreSize
+regionserver.Server.mutationsWithoutWALCount
+regionserver.Server.mutationsWithoutWALSize
+regionserver.Server.percentFilesLocal
+regionserver.Server.readRequestCount
+regionserver.Server.regionCount
+regionserver.Server.regionServerStartTime
+regionserver.Server.slowAppendCount
+regionserver.Server.slowDeleteCount
+regionserver.Server.slowGetCount
+regionserver.Server.slowIncrementCount
+regionserver.Server.slowPutCount
+regionserver.Server.staticBloomSize
+regionserver.Server.staticIndexSize
+regionserver.Server.storeCount
+regionserver.Server.storeFileCount
+regionserver.Server.storeFileIndexSize
+regionserver.Server.storeFileSize
+regionserver.Server.totalRequestCount
+regionserver.Server.updatesBlockedTime
+regionserver.Server.writeRequestCount
+regionserver.WAL.AppendSize_75th_percentile
+regionserver.WAL.AppendSize_95th_percentile
+regionserver.WAL.AppendSize_99th_percentile
+regionserver.WAL.AppendSize_max
+regionserver.WAL.AppendSize_mean
+regionserver.WAL.AppendSize_median
+regionserver.WAL.SyncTime_median
+jvm.JvmMetrics.GcCountParNew
+regionserver.WAL.AppendSize_min
+regionserver.WAL.AppendSize_num_ops
+regionserver.WAL.SyncTime_max
+regionserver.WAL.AppendTime_75th_percentile
+regionserver.WAL.AppendTime_95th_percentile
+regionserver.WAL.AppendTime_99th_percentile
+regionserver.WAL.AppendTime_max
+regionserver.WAL.SyncTime_95th_percentile
+regionserver.WAL.AppendTime_mean
+regionserver.WAL.AppendTime_median
+regionserver.WAL.AppendTime_min
+regionserver.WAL.AppendTime_num_ops
+regionserver.WAL.SyncTime_75th_percentile
+regionserver.WAL.SyncTime_99th_percentile
+regionserver.WAL.SyncTime_mean

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/files/service-metrics/FLUME.txt
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/files/service-metrics/FLUME.txt b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/files/service-metrics/FLUME.txt
new file mode 100755
index 0000000..b3bfec3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/files/service-metrics/FLUME.txt
@@ -0,0 +1,17 @@
+BatchCompleteCount
+BatchUnderflowCount
+EventTakeSuccessCount
+ConnectionClosedCount
+ConnectionCreatedCount
+ChannelCapacity
+ConnectionFailedCount
+EventDrainAttemptCount
+ChannelFillPercentage
+EventDrainSuccessCount
+BatchEmptyCount
+EventPutAttemptCount
+ChannelSize
+EventPutSuccessCount
+StartTime
+StopTime
+EventTakeAttemptCount

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/files/service-metrics/HBASE.txt
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/files/service-metrics/HBASE.txt b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/files/service-metrics/HBASE.txt
new file mode 100755
index 0000000..0067403
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/files/service-metrics/HBASE.txt
@@ -0,0 +1,588 @@
+regionserver.WAL.SyncTime_min
+regionserver.WAL.SyncTime_num_ops
+regionserver.WAL.appendCount
+regionserver.WAL.slowAppendCount
+jvm.JvmMetrics.GcTimeMillis
+jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep
+jvm.JvmMetrics.GcTimeMillisParNew
+ugi.UgiMetrics.GetGroupsAvgTime
+ugi.UgiMetrics.GetGroupsNumOps
+ugi.UgiMetrics.LoginFailureNumOps
+ugi.UgiMetrics.LoginSuccessAvgTime
+ugi.UgiMetrics.LoginSuccessNumOps
+ugi.UgiMetrics.LoginFailureAvgTime
+jvm.JvmMetrics.LogError
+jvm.JvmMetrics.LogFatal
+jvm.JvmMetrics.LogInfo
+jvm.JvmMetrics.LogWarn
+jvm.JvmMetrics.MemHeapCommittedM
+jvm.JvmMetrics.MemHeapMaxM
+jvm.JvmMetrics.MemHeapUsedM
+jvm.JvmMetrics.MemMaxM
+jvm.JvmMetrics.MemNonHeapCommittedM
+jvm.JvmMetrics.MemNonHeapMaxM
+jvm.JvmMetrics.MemNonHeapUsedM
+jvm.JvmMetrics.ThreadsBlocked
+jvm.JvmMetrics.ThreadsNew
+jvm.JvmMetrics.ThreadsRunnable
+jvm.JvmMetrics.ThreadsTerminated
+jvm.JvmMetrics.ThreadsTimedWaiting
+master.AssignmentManger.Assign_75th_percentile
+master.AssignmentManger.Assign_95th_percentile
+master.AssignmentManger.Assign_99th_percentile
+master.AssignmentManger.Assign_max
+master.AssignmentManger.Assign_mean
+master.AssignmentManger.Assign_median
+master.AssignmentManger.Assign_min
+jvm.JvmMetrics.ThreadsWaiting
+master.AssignmentManger.Assign_num_ops
+master.AssignmentManger.BulkAssign_75th_percentile
+master.AssignmentManger.BulkAssign_95th_percentile
+master.AssignmentManger.BulkAssign_99th_percentile
+master.AssignmentManger.BulkAssign_max
+master.AssignmentManger.BulkAssign_mean
+master.AssignmentManger.BulkAssign_median
+master.AssignmentManger.BulkAssign_min
+master.AssignmentManger.BulkAssign_num_ops
+master.AssignmentManger.ritCount
+master.AssignmentManger.ritCountOverThreshold
+master.AssignmentManger.ritOldestAge
+master.Balancer.BalancerCluster_75th_percentile
+master.Balancer.BalancerCluster_95th_percentile
+master.Balancer.BalancerCluster_99th_percentile
+master.Balancer.BalancerCluster_max
+master.Balancer.BalancerCluster_mean
+master.Balancer.BalancerCluster_median
+master.Balancer.BalancerCluster_min
+master.Balancer.BalancerCluster_num_ops
+master.Balancer.miscInvocationCount
+master.FileSystem.HlogSplitSize_75th_percentile
+master.FileSystem.HlogSplitSize_95th_percentile
+master.FileSystem.HlogSplitSize_99th_percentile
+master.FileSystem.HlogSplitSize_max
+master.FileSystem.HlogSplitSize_mean
+master.FileSystem.HlogSplitSize_median
+master.FileSystem.HlogSplitSize_min
+master.FileSystem.HlogSplitSize_num_ops
+master.FileSystem.HlogSplitTime_75th_percentile
+master.FileSystem.HlogSplitTime_95th_percentile
+master.FileSystem.HlogSplitTime_99th_percentile
+master.FileSystem.HlogSplitTime_max
+master.FileSystem.HlogSplitTime_mean
+master.FileSystem.HlogSplitTime_median
+master.FileSystem.HlogSplitTime_min
+master.FileSystem.HlogSplitTime_num_ops
+master.FileSystem.MetaHlogSplitSize_75th_percentile
+master.FileSystem.MetaHlogSplitSize_95th_percentile
+master.FileSystem.MetaHlogSplitSize_99th_percentile
+master.FileSystem.MetaHlogSplitSize_max
+master.FileSystem.MetaHlogSplitSize_mean
+master.FileSystem.MetaHlogSplitSize_median
+master.FileSystem.MetaHlogSplitSize_min
+master.FileSystem.MetaHlogSplitSize_num_ops
+master.FileSystem.MetaHlogSplitTime_75th_percentile
+master.FileSystem.MetaHlogSplitTime_95th_percentile
+master.FileSystem.MetaHlogSplitTime_99th_percentile
+master.FileSystem.MetaHlogSplitTime_max
+master.FileSystem.MetaHlogSplitTime_mean
+master.FileSystem.MetaHlogSplitTime_median
+master.FileSystem.MetaHlogSplitTime_min
+master.FileSystem.MetaHlogSplitTime_num_ops
+master.Server.averageLoad
+master.Server.clusterRequests
+master.Server.masterActiveTime
+master.Server.masterStartTime
+master.Server.numDeadRegionServers
+master.Server.numRegionServers
+metricssystem.MetricsSystem.DroppedPubAll
+metricssystem.MetricsSystem.NumActiveSinks
+ipc.IPC.ProcessCallTime_75th_percentile
+ipc.IPC.ProcessCallTime_95th_percentile
+metricssystem.MetricsSystem.NumActiveSources
+metricssystem.MetricsSystem.NumAllSinks
+ipc.IPC.ProcessCallTime_99th_percentile
+metricssystem.MetricsSystem.NumAllSources
+metricssystem.MetricsSystem.PublishAvgTime
+metricssystem.MetricsSystem.PublishNumOps
+ipc.IPC.ProcessCallTime_max
+ipc.IPC.ProcessCallTime_mean
+metricssystem.MetricsSystem.Sink_timelineAvgTime
+ipc.IPC.ProcessCallTime_median
+metricssystem.MetricsSystem.Sink_timelineDropped
+metricssystem.MetricsSystem.Sink_timelineNumOps
+ipc.IPC.ProcessCallTime_num_ops
+metricssystem.MetricsSystem.Sink_timelineQsize
+metricssystem.MetricsSystem.SnapshotAvgTime
+ipc.IPC.QueueCallTime_95th_percentile
+metricssystem.MetricsSystem.SnapshotNumOps
+ipc.IPC.ProcessCallTime_min
+ipc.IPC.QueueCallTime_75th_percentile
+ipc.IPC.QueueCallTime_99th_percentile
+ipc.IPC.QueueCallTime_max
+ipc.IPC.QueueCallTime_mean
+ipc.IPC.QueueCallTime_median
+ipc.IPC.QueueCallTime_min
+regionserver.Server.Append_75th_percentile
+regionserver.Server.Append_95th_percentile
+ipc.IPC.QueueCallTime_num_ops
+ipc.IPC.authenticationFailures
+regionserver.Server.Append_99th_percentile
+regionserver.Server.Append_max
+ipc.IPC.authenticationSuccesses
+regionserver.Server.Append_mean
+regionserver.Server.Append_median
+regionserver.Server.Append_min
+regionserver.Server.Append_num_ops
+regionserver.Server.Delete_75th_percentile
+regionserver.Server.Delete_95th_percentile
+ipc.IPC.authorizationFailures
+regionserver.Server.Delete_99th_percentile
+regionserver.Server.Delete_max
+regionserver.Server.Delete_mean
+regionserver.Server.Delete_median
+regionserver.Server.Delete_min
+regionserver.Server.Delete_num_ops
+ipc.IPC.authorizationSuccesses
+ipc.IPC.numActiveHandler
+ipc.IPC.numCallsInGeneralQueue
+regionserver.Server.Get_75th_percentile
+regionserver.Server.Get_95th_percentile
+regionserver.Server.Get_99th_percentile
+regionserver.Server.Get_max
+regionserver.Server.Get_mean
+regionserver.Server.Get_median
+ipc.IPC.numCallsInPriorityQueue
+regionserver.Server.Get_min
+regionserver.Server.Get_num_ops
+regionserver.Server.Increment_75th_percentile
+regionserver.Server.Increment_95th_percentile
+regionserver.Server.Increment_99th_percentile
+regionserver.Server.Increment_max
+regionserver.Server.Increment_mean
+regionserver.Server.Increment_median
+ipc.IPC.numCallsInReplicationQueue
+ipc.IPC.numOpenConnections
+regionserver.Server.Increment_min
+regionserver.Server.Increment_num_ops
+ipc.IPC.queueSize
+regionserver.Server.Mutate_75th_percentile
+regionserver.Server.Mutate_95th_percentile
+regionserver.Server.Mutate_99th_percentile
+regionserver.Server.Mutate_max
+regionserver.Server.Mutate_mean
+regionserver.Server.Mutate_median
+ipc.IPC.receivedBytes
+regionserver.Server.Mutate_min
+regionserver.Server.Mutate_num_ops
+regionserver.Server.Replay_75th_percentile
+regionserver.Server.Replay_95th_percentile
+regionserver.Server.Replay_99th_percentile
+regionserver.Server.Replay_max
+regionserver.Server.Replay_mean
+regionserver.Server.Replay_median
+ipc.IPC.sentBytes
+jvm.JvmMetrics.GcCount
+regionserver.Server.Replay_min
+regionserver.Server.Replay_num_ops
+regionserver.Server.blockCacheCount
+regionserver.Server.blockCacheEvictionCount
+regionserver.Server.blockCacheExpressHitPercent
+regionserver.Server.blockCacheFreeSize
+regionserver.Server.blockCacheHitCount
+regionserver.Server.blockCacheMissCount
+regionserver.Server.blockCacheSize
+regionserver.Server.blockCountHitPercent
+regionserver.Server.checkMutateFailedCount
+regionserver.Server.checkMutatePassedCount
+regionserver.Server.compactionQueueLength
+regionserver.Server.flushQueueLength
+jvm.JvmMetrics.GcCountConcurrentMarkSweep
+regionserver.Server.hlogFileCount
+regionserver.Server.hlogFileSize
+regionserver.Server.memStoreSize
+regionserver.Server.mutationsWithoutWALCount
+regionserver.Server.mutationsWithoutWALSize
+regionserver.Server.percentFilesLocal
+regionserver.Server.readRequestCount
+regionserver.Server.regionCount
+regionserver.Server.regionServerStartTime
+regionserver.Server.slowAppendCount
+regionserver.Server.slowDeleteCount
+regionserver.Server.slowGetCount
+regionserver.Server.slowIncrementCount
+regionserver.Server.slowPutCount
+regionserver.Server.staticBloomSize
+regionserver.Server.staticIndexSize
+regionserver.Server.storeCount
+regionserver.Server.storeFileCount
+regionserver.Server.storeFileIndexSize
+regionserver.Server.storeFileSize
+regionserver.Server.totalRequestCount
+regionserver.Server.updatesBlockedTime
+regionserver.Server.writeRequestCount
+regionserver.WAL.AppendSize_75th_percentile
+regionserver.WAL.AppendSize_95th_percentile
+regionserver.WAL.AppendSize_99th_percentile
+regionserver.WAL.AppendSize_max
+regionserver.WAL.AppendSize_mean
+regionserver.WAL.AppendSize_median
+regionserver.WAL.SyncTime_median
+jvm.JvmMetrics.GcCountParNew
+regionserver.WAL.AppendSize_min
+regionserver.WAL.AppendSize_num_ops
+regionserver.WAL.SyncTime_max
+regionserver.WAL.AppendTime_75th_percentile
+regionserver.WAL.AppendTime_95th_percentile
+regionserver.WAL.AppendTime_99th_percentile
+regionserver.WAL.AppendTime_max
+regionserver.WAL.SyncTime_95th_percentile
+regionserver.WAL.AppendTime_mean
+regionserver.WAL.AppendTime_median
+regionserver.WAL.AppendTime_min
+regionserver.WAL.AppendTime_num_ops
+regionserver.WAL.SyncTime_75th_percentile
+regionserver.WAL.SyncTime_99th_percentile
+regionserver.WAL.SyncTime_mean
+regionserver.WAL.SyncTime_median
+regionserver.WAL.SyncTime_min
+regionserver.WAL.SyncTime_num_ops
+regionserver.WAL.appendCount
+regionserver.Server.majorCompactedCellsSize
+regionserver.WAL.rollRequest
+regionserver.WAL.AppendTime_99th_percentile
+regionserver.WAL.slowAppendCount
+regionserver.WAL.AppendTime_num_ops
+regionserver.WAL.SyncTime_95th_percentile
+regionserver.Server.Mutate_median
+regionserver.WAL.AppendTime_75th_percentile
+regionserver.WAL.AppendSize_num_ops
+regionserver.Server.Mutate_max
+regionserver.WAL.AppendSize_min
+regionserver.WAL.AppendTime_min
+regionserver.WAL.SyncTime_99th_percentile
+regionserver.Server.Mutate_95th_percentile
+regionserver.WAL.AppendSize_mean
+regionserver.WAL.SyncTime_mean
+regionserver.WAL.AppendSize_99th_percentile
+jvm.JvmMetrics.GcTimeMillis
+regionserver.WAL.AppendSize_75th_percentile
+jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep
+regionserver.WAL.SyncTime_max
+regionserver.Server.Increment_median
+regionserver.Server.updatesBlockedTime
+regionserver.Server.Increment_max
+ugi.UgiMetrics.GetGroupsAvgTime
+regionserver.WAL.lowReplicaRollRequest
+ugi.UgiMetrics.GetGroupsNumOps
+regionserver.Server.storeFileSize
+regionserver.Server.Increment_95th_percentile
+jvm.JvmMetrics.GcTimeMillisParNew
+ugi.UgiMetrics.LoginFailureAvgTime
+ugi.UgiMetrics.LoginFailureNumOps
+regionserver.Server.storeFileCount
+ugi.UgiMetrics.LoginSuccessNumOps
+regionserver.Server.staticIndexSize
+jvm.JvmMetrics.LogError
+regionserver.Server.splitQueueLength
+regionserver.Server.Get_median
+regionserver.Server.slowPutCount
+regionserver.Server.Get_max
+jvm.JvmMetrics.LogFatal
+regionserver.Server.slowGetCount
+jvm.JvmMetrics.LogInfo
+regionserver.Server.slowAppendCount
+regionserver.Server.Get_95th_percentile
+jvm.JvmMetrics.LogWarn
+regionserver.Server.regionCount
+regionserver.Server.FlushTime_num_ops
+regionserver.Server.FlushTime_min
+regionserver.Server.readRequestCount
+jvm.JvmMetrics.MemHeapCommittedM
+regionserver.Server.percentFilesLocalSecondaryRegions
+regionserver.Server.percentFilesLocal
+regionserver.Server.FlushTime_max
+regionserver.Server.FlushTime_99th_percentile
+regionserver.Server.FlushTime_95th_percentile
+regionserver.Server.Delete_num_ops
+jvm.JvmMetrics.MemHeapMaxM
+regionserver.Server.mutationsWithoutWALCount
+jvm.JvmMetrics.MemHeapUsedM
+regionserver.Server.Delete_median
+regionserver.Server.ScanNext_max
+regionserver.Server.ScanNext_99th_percentile
+regionserver.Server.majorCompactedCellsCount
+regionserver.Server.hlogFileSize
+regionserver.Server.flushedCellsCount
+jvm.JvmMetrics.MemMaxM
+regionserver.Server.hlogFileCount
+regionserver.Server.Delete_95th_percentile
+jvm.JvmMetrics.MemNonHeapCommittedM
+jvm.JvmMetrics.MemNonHeapMaxM
+jvm.JvmMetrics.MemNonHeapUsedM
+regionserver.Server.Append_num_ops
+regionserver.Server.flushQueueLength
+jvm.JvmMetrics.ThreadsBlocked
+regionserver.Server.Append_median
+jvm.JvmMetrics.ThreadsNew
+regionserver.Server.checkMutatePassedCount
+regionserver.Server.compactedCellsSize
+jvm.JvmMetrics.ThreadsRunnable
+jvm.JvmMetrics.ThreadsTerminated
+jvm.JvmMetrics.ThreadsTimedWaiting
+master.AssignmentManger.Assign_75th_percentile
+master.AssignmentManger.Assign_95th_percentile
+master.AssignmentManger.Assign_99th_percentile
+master.AssignmentManger.Assign_max
+regionserver.Server.Append_95th_percentile
+master.AssignmentManger.Assign_mean
+master.AssignmentManger.Assign_median
+regionserver.Replication.sink.appliedOps
+regionserver.Replication.sink.appliedBatches
+regionserver.Replication.sink.ageOfLastAppliedOp
+regionserver.WAL.SyncTime_75th_percentile
+regionserver.RegionServer.receivedBytes
+regionserver.RegionServer.queueSize
+regionserver.RegionServer.numOpenConnections
+regionserver.RegionServer.numCallsInPriorityQueue
+regionserver.Server.Replay_num_ops
+master.AssignmentManger.Assign_min
+master.AssignmentManger.Assign_num_ops
+regionserver.Server.checkMutateFailedCount
+regionserver.RegionServer.exceptions.RegionTooBusyException
+regionserver.RegionServer.exceptions.RegionMovedException
+regionserver.RegionServer.exceptions.OutOfOrderScannerNextException
+master.AssignmentManger.BulkAssign_75th_percentile
+master.AssignmentManger.BulkAssign_95th_percentile
+regionserver.RegionServer.exceptions.FailedSanityCheckException
+regionserver.RegionServer.exceptions
+regionserver.RegionServer.authorizationSuccesses
+regionserver.RegionServer.authenticationSuccesses
+regionserver.RegionServer.authenticationFailures
+regionserver.RegionServer.TotalCallTime_num_ops
+master.AssignmentManger.BulkAssign_99th_percentile
+jvm.JvmMetrics.ThreadsWaiting
+regionserver.RegionServer.TotalCallTime_median
+regionserver.RegionServer.TotalCallTime_mean
+master.AssignmentManger.BulkAssign_max
+regionserver.RegionServer.TotalCallTime_95th_percentile
+regionserver.RegionServer.TotalCallTime_75th_percentile
+regionserver.RegionServer.QueueCallTime_num_ops
+master.AssignmentManger.BulkAssign_mean
+master.AssignmentManger.BulkAssign_median
+regionserver.RegionServer.QueueCallTime_median
+regionserver.RegionServer.QueueCallTime_mean
+regionserver.RegionServer.QueueCallTime_max
+regionserver.RegionServer.QueueCallTime_95th_percentile
+regionserver.RegionServer.QueueCallTime_75th_percentile
+regionserver.RegionServer.ProcessCallTime_num_ops
+regionserver.RegionServer.ProcessCallTime_median
+regionserver.RegionServer.ProcessCallTime_mean
+regionserver.Server.ScanNext_num_ops
+master.AssignmentManger.BulkAssign_num_ops
+master.AssignmentManger.BulkAssign_min
+regionserver.RegionServer.ProcessCallTime_95th_percentile
+master.AssignmentManger.ritCount
+master.AssignmentManger.ritCountOverThreshold
+master.AssignmentManger.ritOldestAge
+master.Balancer.BalancerCluster_75th_percentile
+master.Balancer.BalancerCluster_95th_percentile
+master.Balancer.BalancerCluster_99th_percentile
+ugi.UgiMetrics.LoginSuccessAvgTime
+master.Balancer.BalancerCluster_max
+master.Balancer.BalancerCluster_mean
+master.Balancer.BalancerCluster_median
+master.Balancer.BalancerCluster_min
+regionserver.Server.ScanNext_median
+master.Balancer.BalancerCluster_num_ops
+master.Balancer.miscInvocationCount
+master.FileSystem.HlogSplitSize_75th_percentile
+master.FileSystem.HlogSplitSize_95th_percentile
+master.FileSystem.HlogSplitSize_max
+master.FileSystem.HlogSplitSize_99th_percentile
+master.FileSystem.HlogSplitSize_mean
+master.FileSystem.HlogSplitSize_median
+master.FileSystem.HlogSplitSize_min
+master.FileSystem.HlogSplitSize_num_ops
+master.FileSystem.HlogSplitTime_75th_percentile
+master.FileSystem.HlogSplitTime_95th_percentile
+regionserver.Server.SplitTime_median
+master.FileSystem.HlogSplitTime_max
+master.FileSystem.HlogSplitTime_99th_percentile
+master.FileSystem.HlogSplitTime_mean
+master.FileSystem.HlogSplitTime_median
+master.FileSystem.HlogSplitTime_min
+master.FileSystem.HlogSplitTime_num_ops
+master.FileSystem.MetaHlogSplitSize_75th_percentile
+master.FileSystem.MetaHlogSplitSize_95th_percentile
+master.FileSystem.MetaHlogSplitSize_max
+master.FileSystem.MetaHlogSplitSize_99th_percentile
+master.FileSystem.MetaHlogSplitSize_mean
+master.FileSystem.MetaHlogSplitSize_median
+master.FileSystem.MetaHlogSplitSize_min
+master.FileSystem.MetaHlogSplitSize_num_ops
+master.FileSystem.MetaHlogSplitTime_75th_percentile
+master.FileSystem.MetaHlogSplitTime_95th_percentile
+master.FileSystem.MetaHlogSplitTime_max
+master.FileSystem.MetaHlogSplitTime_99th_percentile
+master.FileSystem.MetaHlogSplitTime_mean
+master.FileSystem.MetaHlogSplitTime_median
+master.FileSystem.MetaHlogSplitTime_min
+master.FileSystem.MetaHlogSplitTime_num_ops
+master.Master.ProcessCallTime_75th_percentile
+master.Master.ProcessCallTime_95th_percentile
+master.Master.ProcessCallTime_99th_percentile
+master.Master.ProcessCallTime_max
+master.Master.ProcessCallTime_mean
+master.Master.ProcessCallTime_median
+master.Master.ProcessCallTime_min
+master.Master.ProcessCallTime_num_ops
+master.Master.QueueCallTime_75th_percentile
+master.Master.QueueCallTime_95th_percentile
+master.Master.QueueCallTime_99th_percentile
+master.Master.QueueCallTime_max
+master.Master.QueueCallTime_mean
+regionserver.Server.blockCacheCountHitPercent
+master.Master.QueueCallTime_median
+master.Master.QueueCallTime_min
+master.Master.QueueCallTime_num_ops
+master.Master.TotalCallTime_75th_percentile
+master.Master.TotalCallTime_95th_percentile
+master.Master.TotalCallTime_99th_percentile
+master.Master.TotalCallTime_max
+master.Master.TotalCallTime_mean
+master.Master.TotalCallTime_median
+master.Master.TotalCallTime_min
+master.Master.TotalCallTime_num_ops
+master.Master.authenticationFailures
+master.Master.authenticationSuccesses
+master.Master.authorizationFailures
+master.Master.authorizationSuccesses
+master.Master.exceptions
+master.Master.exceptions.FailedSanityCheckException
+master.Master.exceptions.NotServingRegionException
+master.Master.exceptions.OutOfOrderScannerNextException
+master.Master.exceptions.RegionMovedException
+master.Master.exceptions.RegionTooBusyException
+master.Master.exceptions.UnknownScannerException
+master.Master.numActiveHandler
+master.Master.numCallsInGeneralQueue
+master.Master.numCallsInPriorityQueue
+master.Master.numCallsInReplicationQueue
+regionserver.Server.blockCacheSize
+master.Master.numOpenConnections
+master.Master.queueSize
+master.Master.receivedBytes
+master.Master.sentBytes
+master.Server.averageLoad
+master.Server.clusterRequests
+master.Server.masterActiveTime
+master.Server.numDeadRegionServers
+master.Server.masterStartTime
+master.Server.numRegionServers
+metricssystem.MetricsSystem.DroppedPubAll
+regionserver.Server.SplitTime_min
+regionserver.Server.blockCacheHitCount
+metricssystem.MetricsSystem.NumActiveSinks
+metricssystem.MetricsSystem.NumActiveSources
+metricssystem.MetricsSystem.NumAllSinks
+metricssystem.MetricsSystem.NumAllSources
+regionserver.Server.blockCacheExpressHitPercent
+metricssystem.MetricsSystem.PublishAvgTime
+metricssystem.MetricsSystem.PublishNumOps
+metricssystem.MetricsSystem.Sink_timelineAvgTime
+regionserver.Server.SplitTime_num_ops
+metricssystem.MetricsSystem.Sink_timelineDropped
+metricssystem.MetricsSystem.Sink_timelineNumOps
+regionserver.Server.SplitTime_max
+regionserver.Server.ScanNext_min
+metricssystem.MetricsSystem.Sink_timelineQsize
+metricssystem.MetricsSystem.SnapshotAvgTime
+metricssystem.MetricsSystem.SnapshotNumOps
+regionserver.Server.SplitTime_95th_percentile
+regionserver.Server.SplitTime_99th_percentile
+regionserver.RegionServer.ProcessCallTime_75th_percentile
+regionserver.RegionServer.ProcessCallTime_99th_percentile
+regionserver.RegionServer.ProcessCallTime_max
+regionserver.RegionServer.ProcessCallTime_min
+regionserver.RegionServer.QueueCallTime_99th_percentile
+regionserver.RegionServer.QueueCallTime_min
+regionserver.RegionServer.TotalCallTime_99th_percentile
+regionserver.RegionServer.TotalCallTime_max
+regionserver.RegionServer.TotalCallTime_min
+regionserver.RegionServer.authorizationFailures
+regionserver.RegionServer.exceptions.NotServingRegionException
+regionserver.RegionServer.exceptions.UnknownScannerException
+regionserver.RegionServer.numActiveHandler
+regionserver.RegionServer.numCallsInGeneralQueue
+regionserver.Server.ScanNext_95th_percentile
+regionserver.RegionServer.numCallsInReplicationQueue
+regionserver.RegionServer.sentBytes
+regionserver.Server.Append_75th_percentile
+regionserver.Server.Append_99th_percentile
+regionserver.Server.Append_max
+regionserver.Server.Append_mean
+regionserver.Server.Append_min
+regionserver.Server.Delete_75th_percentile
+regionserver.Server.Delete_99th_percentile
+regionserver.Server.Delete_max
+regionserver.Server.Delete_mean
+regionserver.Server.Delete_min
+regionserver.Server.FlushTime_75th_percentile
+regionserver.Server.FlushTime_mean
+regionserver.Server.FlushTime_median
+regionserver.Server.Get_75th_percentile
+regionserver.Server.Get_99th_percentile
+regionserver.Server.Get_mean
+regionserver.Server.Get_min
+regionserver.Server.Get_num_ops
+regionserver.Server.Increment_75th_percentile
+regionserver.Server.Increment_99th_percentile
+regionserver.Server.Increment_mean
+regionserver.Server.Increment_min
+regionserver.Server.Increment_num_ops
+regionserver.Server.Mutate_75th_percentile
+regionserver.Server.Mutate_99th_percentile
+regionserver.Server.Mutate_mean
+regionserver.Server.Mutate_min
+regionserver.Server.Mutate_num_ops
+regionserver.Server.Replay_75th_percentile
+regionserver.Server.Replay_99th_percentile
+regionserver.Server.Replay_mean
+regionserver.Server.Replay_min
+regionserver.Server.ScanNext_75th_percentile
+regionserver.Server.ScanNext_mean
+regionserver.Server.SplitTime_75th_percentile
+jvm.JvmMetrics.GcCount
+regionserver.Server.SplitTime_mean
+regionserver.Server.Replay_max
+regionserver.Server.blockCacheCount
+regionserver.Server.blockCacheEvictionCount
+regionserver.Server.blockCacheFreeSize
+regionserver.Server.blockCacheMissCount
+regionserver.Server.Replay_median
+regionserver.Server.blockedRequestCount
+regionserver.Server.compactedCellsCount
+regionserver.Server.compactionQueueLength
+regionserver.Server.flushedCellsSize
+regionserver.Server.memStoreSize
+regionserver.Server.mutationsWithoutWALSize
+jvm.JvmMetrics.GcCountConcurrentMarkSweep
+regionserver.Server.regionServerStartTime
+regionserver.Server.slowDeleteCount
+regionserver.Server.slowIncrementCount
+regionserver.Server.splitRequestCount
+regionserver.Server.splitSuccessCount
+regionserver.Server.staticBloomSize
+regionserver.Server.storeCount
+regionserver.Server.storeFileIndexSize
+regionserver.Server.totalRequestCount
+regionserver.Server.writeRequestCount
+regionserver.WAL.AppendSize_95th_percentile
+regionserver.WAL.AppendSize_max
+regionserver.WAL.AppendSize_median
+regionserver.Server.Replay_95th_percentile
+regionserver.WAL.AppendTime_95th_percentile
+regionserver.WAL.AppendTime_median
+regionserver.WAL.AppendTime_max
+jvm.JvmMetrics.GcCountParNew
+regionserver.WAL.AppendTime_mean

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/files/service-metrics/HDFS.txt
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/files/service-metrics/HDFS.txt b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/files/service-metrics/HDFS.txt
new file mode 100755
index 0000000..84576e9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/files/service-metrics/HDFS.txt
@@ -0,0 +1,277 @@
+FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.CacheCapacity
+FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.CacheUsed
+FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Capacity
+FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.DfsUsed
+FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.EstimatedCapacityLostTotal
+FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.LastVolumeFailureDate
+FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.NumBlocksCached
+FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.NumBlocksFailedToCache
+FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.NumBlocksFailedToUnCache
+FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.NumFailedVolumes
+FSDatasetState.org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.Remaining
+default.StartupProgress.ElapsedTime
+default.StartupProgress.LoadingEditsCount
+default.StartupProgress.LoadingEditsElapsedTime
+default.StartupProgress.LoadingEditsPercentComplete
+default.StartupProgress.LoadingEditsTotal
+default.StartupProgress.LoadingFsImageCount
+default.StartupProgress.LoadingFsImageElapsedTime
+default.StartupProgress.LoadingFsImagePercentComplete
+default.StartupProgress.LoadingFsImageTotal
+default.StartupProgress.PercentComplete
+default.StartupProgress.SafeModeCount
+default.StartupProgress.SafeModeElapsedTime
+default.StartupProgress.SafeModePercentComplete
+default.StartupProgress.SafeModeTotal
+default.StartupProgress.SavingCheckpointCount
+default.StartupProgress.SavingCheckpointElapsedTime
+default.StartupProgress.SavingCheckpointPercentComplete
+default.StartupProgress.SavingCheckpointTotal
+dfs.FSNamesystem.BlockCapacity
+dfs.FSNamesystem.BlocksTotal
+dfs.FSNamesystem.CapacityRemaining
+dfs.FSNamesystem.CapacityRemainingGB
+dfs.FSNamesystem.CapacityTotal
+dfs.FSNamesystem.CapacityTotalGB
+dfs.FSNamesystem.CapacityUsed
+dfs.FSNamesystem.CapacityUsedGB
+dfs.FSNamesystem.CapacityUsedNonDFS
+dfs.FSNamesystem.CorruptBlocks
+dfs.FSNamesystem.ExcessBlocks
+dfs.FSNamesystem.ExpiredHeartbeats
+dfs.FSNamesystem.FilesTotal
+dfs.FSNamesystem.LastCheckpointTime
+dfs.FSNamesystem.LastWrittenTransactionId
+dfs.FSNamesystem.LockQueueLength
+dfs.FSNamesystem.MillisSinceLastLoadedEdits
+dfs.FSNamesystem.MissingBlocks
+dfs.FSNamesystem.MissingReplOneBlocks
+dfs.FSNamesystem.NumActiveClients
+dfs.FSNamesystem.NumFilesUnderConstruction
+dfs.FSNamesystem.PendingDataNodeMessageCount
+dfs.FSNamesystem.PendingDeletionBlocks
+dfs.FSNamesystem.PendingReplicationBlocks
+dfs.FSNamesystem.PostponedMisreplicatedBlocks
+dfs.FSNamesystem.ScheduledReplicationBlocks
+dfs.FSNamesystem.Snapshots
+dfs.FSNamesystem.SnapshottableDirectories
+dfs.FSNamesystem.StaleDataNodes
+dfs.FSNamesystem.TotalFiles
+dfs.FSNamesystem.TotalLoad
+dfs.FSNamesystem.TotalSyncCount
+dfs.FSNamesystem.TransactionsSinceLastCheckpoint
+dfs.FSNamesystem.TransactionsSinceLastLogRoll
+dfs.FSNamesystem.UnderReplicatedBlocks
+dfs.datanode.BlockChecksumOpAvgTime
+dfs.datanode.BlockChecksumOpNumOps
+dfs.datanode.BlockReportsAvgTime
+dfs.datanode.BlockReportsNumOps
+dfs.datanode.BlockVerificationFailures
+dfs.datanode.BlocksCached
+dfs.datanode.BlocksGetLocalPathInfo
+dfs.datanode.BlocksRead
+dfs.datanode.BlocksRemoved
+dfs.datanode.BlocksReplicated
+dfs.datanode.BlocksUncached
+dfs.datanode.BlocksVerified
+dfs.datanode.BlocksWritten
+dfs.datanode.BytesRead
+dfs.datanode.BytesWritten
+dfs.datanode.CacheReportsAvgTime
+dfs.datanode.CacheReportsNumOps
+dfs.datanode.CopyBlockOpAvgTime
+dfs.datanode.CopyBlockOpNumOps
+dfs.datanode.DatanodeNetworkErrors
+dfs.datanode.FlushNanosAvgTime
+dfs.datanode.FlushNanosNumOps
+dfs.datanode.FsyncCount
+dfs.datanode.FsyncNanosAvgTime
+dfs.datanode.FsyncNanosNumOps
+dfs.datanode.HeartbeatsAvgTime
+dfs.datanode.HeartbeatsNumOps
+dfs.datanode.IncrementalBlockReportsAvgTime
+dfs.datanode.IncrementalBlockReportsNumOps
+dfs.datanode.PacketAckRoundTripTimeNanosAvgTime
+dfs.datanode.PacketAckRoundTripTimeNanosNumOps
+dfs.datanode.RamDiskBlocksDeletedBeforeLazyPersisted
+dfs.datanode.RamDiskBlocksEvicted
+dfs.datanode.RamDiskBlocksEvictedWithoutRead
+dfs.datanode.RamDiskBlocksEvictionWindowMsAvgTime
+dfs.datanode.RamDiskBlocksEvictionWindowMsNumOps
+dfs.datanode.RamDiskBlocksLazyPersistWindowMsAvgTime
+dfs.datanode.RamDiskBlocksLazyPersistWindowMsNumOps
+dfs.datanode.RamDiskBlocksLazyPersisted
+dfs.datanode.RamDiskBlocksReadHits
+dfs.datanode.RamDiskBlocksWrite
+dfs.datanode.RamDiskBlocksWriteFallback
+dfs.datanode.RamDiskBytesLazyPersisted
+dfs.datanode.RamDiskBytesWrite
+dfs.datanode.ReadBlockOpAvgTime
+dfs.datanode.ReadBlockOpNumOps
+dfs.datanode.ReadsFromLocalClient
+dfs.datanode.ReadsFromRemoteClient
+dfs.datanode.RemoteBytesRead
+dfs.datanode.RemoteBytesWritten
+dfs.datanode.ReplaceBlockOpAvgTime
+dfs.datanode.ReplaceBlockOpNumOps
+dfs.datanode.SendDataPacketBlockedOnNetworkNanosAvgTime
+dfs.datanode.SendDataPacketBlockedOnNetworkNanosNumOps
+dfs.datanode.SendDataPacketTransferNanosAvgTime
+dfs.datanode.SendDataPacketTransferNanosNumOps
+dfs.datanode.TotalReadTime
+dfs.datanode.TotalWriteTime
+dfs.datanode.VolumeFailures
+dfs.datanode.WriteBlockOpAvgTime
+dfs.datanode.WriteBlockOpNumOps
+dfs.datanode.WritesFromLocalClient
+dfs.datanode.WritesFromRemoteClient
+dfs.namenode.AddBlockOps
+dfs.namenode.AllowSnapshotOps
+dfs.namenode.BlockReceivedAndDeletedOps
+dfs.namenode.BlockReportAvgTime
+dfs.namenode.BlockReportNumOps
+dfs.namenode.CacheReportAvgTime
+dfs.namenode.CacheReportNumOps
+dfs.namenode.CreateFileOps
+dfs.namenode.CreateSnapshotOps
+dfs.namenode.CreateSymlinkOps
+dfs.namenode.DeleteFileOps
+dfs.namenode.DeleteSnapshotOps
+dfs.namenode.DisallowSnapshotOps
+dfs.namenode.FileInfoOps
+dfs.namenode.FilesAppended
+dfs.namenode.FilesCreated
+dfs.namenode.FilesDeleted
+dfs.namenode.FilesInGetListingOps
+dfs.namenode.FilesRenamed
+dfs.namenode.FilesTruncated
+dfs.namenode.FsImageLoadTime
+dfs.namenode.GetAdditionalDatanodeOps
+dfs.namenode.GetBlockLocations
+dfs.namenode.GetEditAvgTime
+dfs.namenode.GetEditNumOps
+dfs.namenode.GetImageAvgTime
+dfs.namenode.GetImageNumOps
+dfs.namenode.GetLinkTargetOps
+dfs.namenode.GetListingOps
+dfs.namenode.ListSnapshottableDirOps
+dfs.namenode.PutImageAvgTime
+dfs.namenode.PutImageNumOps
+dfs.namenode.RenameSnapshotOps
+dfs.namenode.SafeModeTime
+dfs.namenode.SnapshotDiffReportOps
+dfs.namenode.StorageBlockReportOps
+dfs.namenode.SyncsAvgTime
+dfs.namenode.SyncsNumOps
+dfs.namenode.TotalFileOps
+dfs.namenode.TransactionsAvgTime
+dfs.namenode.TransactionsBatchedInSync
+dfs.namenode.TransactionsNumOps
+jvm.JvmMetrics.GcCount
+jvm.JvmMetrics.GcCountConcurrentMarkSweep
+jvm.JvmMetrics.GcCountParNew
+jvm.JvmMetrics.GcNumInfoThresholdExceeded
+jvm.JvmMetrics.GcNumWarnThresholdExceeded
+jvm.JvmMetrics.GcTimeMillis
+jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep
+jvm.JvmMetrics.GcTimeMillisParNew
+jvm.JvmMetrics.GcTotalExtraSleepTime
+jvm.JvmMetrics.LogError
+jvm.JvmMetrics.LogFatal
+jvm.JvmMetrics.LogInfo
+jvm.JvmMetrics.LogWarn
+jvm.JvmMetrics.MemHeapCommittedM
+jvm.JvmMetrics.MemHeapMaxM
+jvm.JvmMetrics.MemHeapUsedM
+jvm.JvmMetrics.MemMaxM
+jvm.JvmMetrics.MemNonHeapCommittedM
+jvm.JvmMetrics.MemNonHeapMaxM
+jvm.JvmMetrics.MemNonHeapUsedM
+jvm.JvmMetrics.ThreadsBlocked
+jvm.JvmMetrics.ThreadsNew
+jvm.JvmMetrics.ThreadsRunnable
+jvm.JvmMetrics.ThreadsTerminated
+jvm.JvmMetrics.ThreadsTimedWaiting
+jvm.JvmMetrics.ThreadsWaiting
+metricssystem.MetricsSystem.DroppedPubAll
+metricssystem.MetricsSystem.NumActiveSinks
+metricssystem.MetricsSystem.NumActiveSources
+metricssystem.MetricsSystem.NumAllSinks
+metricssystem.MetricsSystem.NumAllSources
+metricssystem.MetricsSystem.PublishAvgTime
+metricssystem.MetricsSystem.PublishNumOps
+metricssystem.MetricsSystem.Sink_timelineAvgTime
+metricssystem.MetricsSystem.Sink_timelineDropped
+metricssystem.MetricsSystem.Sink_timelineNumOps
+metricssystem.MetricsSystem.Sink_timelineQsize
+metricssystem.MetricsSystem.SnapshotAvgTime
+metricssystem.MetricsSystem.SnapshotNumOps
+rpc.RetryCache.NameNodeRetryCache.CacheCleared
+rpc.RetryCache.NameNodeRetryCache.CacheHit
+rpc.RetryCache.NameNodeRetryCache.CacheUpdated
+rpc.rpc.CallQueueLength
+rpc.rpc.NumOpenConnections
+rpc.rpc.ReceivedBytes
+rpc.rpc.RpcAuthenticationFailures
+rpc.rpc.RpcAuthenticationSuccesses
+rpc.rpc.RpcAuthorizationFailures
+rpc.rpc.RpcAuthorizationSuccesses
+rpc.rpc.RpcClientBackoff
+rpc.rpc.RpcProcessingTimeAvgTime
+rpc.rpc.RpcProcessingTimeNumOps
+rpc.rpc.RpcQueueTimeAvgTime
+rpc.rpc.RpcQueueTimeNumOps
+rpc.rpc.RpcSlowCalls
+rpc.rpc.SentBytes
+rpcdetailed.rpcdetailed.AddBlockAvgTime
+rpcdetailed.rpcdetailed.AddBlockNumOps
+rpcdetailed.rpcdetailed.BlockReceivedAndDeletedAvgTime
+rpcdetailed.rpcdetailed.BlockReceivedAndDeletedNumOps
+rpcdetailed.rpcdetailed.BlockReportAvgTime
+rpcdetailed.rpcdetailed.BlockReportNumOps
+rpcdetailed.rpcdetailed.CompleteAvgTime
+rpcdetailed.rpcdetailed.CompleteNumOps
+rpcdetailed.rpcdetailed.CreateAvgTime
+rpcdetailed.rpcdetailed.CreateNumOps
+rpcdetailed.rpcdetailed.DeleteAvgTime
+rpcdetailed.rpcdetailed.DeleteNumOps
+rpcdetailed.rpcdetailed.FsyncAvgTime
+rpcdetailed.rpcdetailed.FsyncNumOps
+rpcdetailed.rpcdetailed.GetBlockLocationsAvgTime
+rpcdetailed.rpcdetailed.GetBlockLocationsNumOps
+rpcdetailed.rpcdetailed.GetFileInfoAvgTime
+rpcdetailed.rpcdetailed.GetFileInfoNumOps
+rpcdetailed.rpcdetailed.GetListingAvgTime
+rpcdetailed.rpcdetailed.GetListingNumOps
+rpcdetailed.rpcdetailed.GetServerDefaultsAvgTime
+rpcdetailed.rpcdetailed.GetServerDefaultsNumOps
+rpcdetailed.rpcdetailed.GetTransactionIdAvgTime
+rpcdetailed.rpcdetailed.GetTransactionIdNumOps
+rpcdetailed.rpcdetailed.MkdirsAvgTime
+rpcdetailed.rpcdetailed.MkdirsNumOps
+rpcdetailed.rpcdetailed.RegisterDatanodeAvgTime
+rpcdetailed.rpcdetailed.RegisterDatanodeNumOps
+rpcdetailed.rpcdetailed.Rename2AvgTime
+rpcdetailed.rpcdetailed.Rename2NumOps
+rpcdetailed.rpcdetailed.RenameAvgTime
+rpcdetailed.rpcdetailed.RenameNumOps
+rpcdetailed.rpcdetailed.RenewLeaseAvgTime
+rpcdetailed.rpcdetailed.RenewLeaseNumOps
+rpcdetailed.rpcdetailed.SendHeartbeatAvgTime
+rpcdetailed.rpcdetailed.SendHeartbeatNumOps
+rpcdetailed.rpcdetailed.SetPermissionAvgTime
+rpcdetailed.rpcdetailed.SetPermissionNumOps
+rpcdetailed.rpcdetailed.SetReplicationAvgTime
+rpcdetailed.rpcdetailed.SetReplicationNumOps
+rpcdetailed.rpcdetailed.SetSafeModeAvgTime
+rpcdetailed.rpcdetailed.SetSafeModeNumOps
+rpcdetailed.rpcdetailed.SetTimesAvgTime
+rpcdetailed.rpcdetailed.SetTimesNumOps
+rpcdetailed.rpcdetailed.VersionRequestAvgTime
+rpcdetailed.rpcdetailed.VersionRequestNumOps
+ugi.UgiMetrics.GetGroupsAvgTime
+ugi.UgiMetrics.GetGroupsNumOps
+ugi.UgiMetrics.LoginFailureAvgTime
+ugi.UgiMetrics.LoginFailureNumOps
+ugi.UgiMetrics.LoginSuccessAvgTime
+ugi.UgiMetrics.LoginSuccessNumOps

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/files/service-metrics/HOST.txt
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/files/service-metrics/HOST.txt b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/files/service-metrics/HOST.txt
new file mode 100755
index 0000000..4b759c6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/files/service-metrics/HOST.txt
@@ -0,0 +1,37 @@
+swap_free
+write_bps
+write_bytes
+write_count
+write_time
+bytes_in
+bytes_out
+cpu_idle
+cpu_intr
+cpu_nice
+cpu_num
+cpu_sintr
+cpu_steal
+cpu_system
+cpu_user
+load_fifteen
+load_five
+load_one
+cpu_wio
+disk_free
+disk_percent
+disk_total
+mem_buffered
+mem_cached
+mem_free
+mem_shared
+mem_total
+mem_used
+disk_used
+pkts_in
+pkts_out
+proc_run
+proc_total
+read_bps
+read_bytes
+read_count
+read_time

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/files/service-metrics/KAFKA.txt
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/files/service-metrics/KAFKA.txt b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/files/service-metrics/KAFKA.txt
new file mode 100755
index 0000000..1e2017c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/files/service-metrics/KAFKA.txt
@@ -0,0 +1,190 @@
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.OffsetCommit.98percentile
+kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Offsets.95percentile
+kafka.network.RequestMetrics.ResponseSendTimeMs.request.Fetch.95percentile
+kafka.network.RequestMetrics.RequestsPerSec.request.StopReplica.count
+kafka.network.RequestMetrics.RequestsPerSec.request.ControlledShutdown.1MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.ControlledShutdown.5MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.ControlledShutdown.count
+kafka.network.RequestMetrics.RequestsPerSec.request.Fetch.1MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.Fetch.5MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.FetchFollower.count
+kafka.network.RequestMetrics.RequestsPerSec.request.FetchConsumer.15MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.LeaderAndIsr.count
+kafka.network.RequestMetrics.RequestsPerSec.request.LeaderAndIsr.5MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.LeaderAndIsr.meanRate
+kafka.network.RequestMetrics.RequestsPerSec.request.Fetch.15MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.Metadata.15MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.Metadata.1MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.Metadata.meanRate
+kafka.network.RequestMetrics.RequestsPerSec.request.Metadata.5MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.OffsetCommit.15MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.OffsetCommit.1MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.ControlledShutdown.meanRate
+kafka.network.RequestMetrics.RequestsPerSec.request.OffsetFetch.15MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.OffsetFetch.1MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.OffsetCommit.meanRate
+kafka.network.RequestMetrics.RequestsPerSec.request.Offsets.count
+kafka.network.RequestMetrics.RequestsPerSec.request.Produce.5MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.Produce.1MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.UpdateMetadata.5MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.UpdateMetadata.1MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.StopReplica.meanRate
+kafka.network.RequestMetrics.RequestsPerSec.request.Produce.15MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.StopReplica.5MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.StopReplica.15MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.UpdateMetadata.meanRate
+kafka.network.RequestMetrics.RequestsPerSec.request.StopReplica.1MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.UpdateMetadata.count
+kafka.network.RequestMetrics.RequestsPerSec.request.UpdateMetadata.15MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.Produce.meanRate
+kafka.network.RequestMetrics.RequestsPerSec.request.Offsets.meanRate
+kafka.network.RequestMetrics.RequestsPerSec.request.Offsets.1MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.OffsetFetch.meanRate
+kafka.network.RequestMetrics.RequestsPerSec.request.OffsetFetch.count
+kafka.network.RequestMetrics.RequestsPerSec.request.OffsetFetch.5MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.OffsetCommit.count
+kafka.network.RequestMetrics.RequestsPerSec.request.Metadata.count
+kafka.network.RequestMetrics.RequestsPerSec.request.LeaderAndIsr.1MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.LeaderAndIsr.15MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.JoinGroup.meanRate
+kafka.network.RequestMetrics.RequestsPerSec.request.JoinGroup.count
+kafka.network.RequestMetrics.RequestsPerSec.request.JoinGroup.5MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.JoinGroup.1MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.JoinGroup.15MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.Heartbeat.meanRate
+kafka.network.RequestMetrics.RequestsPerSec.request.Heartbeat.count
+kafka.network.RequestMetrics.RequestsPerSec.request.Heartbeat.5MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.Heartbeat.1MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.Heartbeat.15MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.FetchFollower.meanRate
+kafka.network.RequestMetrics.RequestsPerSec.request.FetchFollower.5MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.FetchFollower.1MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.FetchFollower.15MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.FetchConsumer.meanRate
+kafka.network.RequestMetrics.RequestsPerSec.request.FetchConsumer.count
+kafka.network.RequestMetrics.RequestsPerSec.request.FetchConsumer.5MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.FetchConsumer.1MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.Fetch.count
+kafka.network.RequestMetrics.RequestsPerSec.request.Offsets.5MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.Offsets.15MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.OffsetCommit.5MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.Fetch.meanRate
+kafka.network.RequestMetrics.RequestsPerSec.request.ControlledShutdown.15MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.ConsumerMetadata.count
+kafka.network.RequestMetrics.RequestsPerSec.request.ConsumerMetadata.15MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.ConsumerMetadata.5MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.ConsumerMetadata.1MinuteRate
+kafka.network.RequestMetrics.RequestsPerSec.request.Produce.count
+kafka.network.RequestMetrics.RequestsPerSec.request.ConsumerMetadata.meanRate
+kafka.cluster.Partition.UnderReplicated.partition.0.topic.ambari_kafka_service_check
+kafka.server.ReplicaManager.UnderReplicatedPartitions
+kafka.server.ReplicaManager.PartitionCount
+kafka.server.ReplicaManager.LeaderCount
+kafka.server.ReplicaManager.IsrShrinksPerSec.count
+kafka.server.ReplicaManager.IsrShrinksPerSec.5MinuteRate
+kafka.server.ReplicaManager.IsrShrinksPerSec.1MinuteRate
+kafka.server.ReplicaManager.IsrShrinksPerSec.15MinuteRate
+kafka.server.ReplicaManager.IsrExpandsPerSec.meanRate
+kafka.server.ReplicaManager.IsrExpandsPerSec.count
+kafka.server.ReplicaManager.IsrExpandsPerSec.5MinuteRate
+kafka.server.ReplicaManager.IsrExpandsPerSec.15MinuteRate
+kafka.server.ReplicaFetcherManager.MinFetchRate.clientId.Replica
+kafka.server.ReplicaFetcherManager.MaxLag.clientId.Replica
+kafka.server.OffsetManager.NumOffsets
+kafka.server.OffsetManager.NumGroups
+kafka.server.KafkaServer.BrokerState
+kafka.server.KafkaRequestHandlerPool.RequestHandlerAvgIdlePercent.meanRate
+kafka.server.KafkaRequestHandlerPool.RequestHandlerAvgIdlePercent.5MinuteRate
+kafka.server.KafkaRequestHandlerPool.RequestHandlerAvgIdlePercent.1MinuteRate
+kafka.server.KafkaRequestHandlerPool.RequestHandlerAvgIdlePercent.15MinuteRate
+kafka.server.BrokerTopicMetrics.TotalProduceRequestsPerSec.meanRate
+kafka.server.BrokerTopicMetrics.TotalProduceRequestsPerSec.count
+kafka.server.BrokerTopicMetrics.TotalProduceRequestsPerSec.5MinuteRate
+kafka.server.BrokerTopicMetrics.TotalProduceRequestsPerSec.1MinuteRate
+kafka.server.BrokerTopicMetrics.TotalProduceRequestsPerSec.15MinuteRate
+kafka.server.BrokerTopicMetrics.TotalFetchRequestsPerSec.meanRate
+kafka.server.BrokerTopicMetrics.TotalFetchRequestsPerSec.count
+kafka.server.BrokerTopicMetrics.TotalFetchRequestsPerSec.5MinuteRate
+kafka.server.BrokerTopicMetrics.TotalFetchRequestsPerSec.1MinuteRate
+kafka.server.BrokerTopicMetrics.TotalFetchRequestsPerSec.15MinuteRate
+kafka.server.BrokerTopicMetrics.MessagesInPerSec.count
+kafka.server.BrokerTopicMetrics.MessagesInPerSec.5MinuteRate
+kafka.server.BrokerTopicMetrics.MessagesInPerSec.1MinuteRate
+kafka.server.BrokerTopicMetrics.FailedProduceRequestsPerSec.meanRate
+kafka.server.BrokerTopicMetrics.FailedProduceRequestsPerSec.count
+kafka.server.BrokerTopicMetrics.FailedProduceRequestsPerSec.5MinuteRate
+kafka.server.BrokerTopicMetrics.FailedProduceRequestsPerSec.15MinuteRate
+kafka.server.BrokerTopicMetrics.FailedFetchRequestsPerSec.meanRate
+kafka.server.BrokerTopicMetrics.FailedFetchRequestsPerSec.count
+kafka.server.BrokerTopicMetrics.FailedFetchRequestsPerSec.5MinuteRate
+kafka.server.BrokerTopicMetrics.FailedFetchRequestsPerSec.1MinuteRate
+kafka.server.BrokerTopicMetrics.FailedFetchRequestsPerSec.15MinuteRate
+kafka.server.BrokerTopicMetrics.BytesOutPerSec.meanRate
+kafka.server.BrokerTopicMetrics.BytesOutPerSec.count
+kafka.server.BrokerTopicMetrics.BytesOutPerSec.5MinuteRate
+kafka.server.BrokerTopicMetrics.BytesOutPerSec.1MinuteRate
+kafka.server.BrokerTopicMetrics.BytesInPerSec.meanRate
+kafka.server.BrokerTopicMetrics.BytesInPerSec.count
+kafka.server.BrokerTopicMetrics.BytesInPerSec.5MinuteRate
+kafka.server.BrokerTopicMetrics.BytesInPerSec.15MinuteRate
+kafka.network.SocketServer.ResponsesBeingSent
+kafka.network.SocketServer.NetworkProcessorAvgIdlePercent.meanRate
+kafka.network.SocketServer.NetworkProcessorAvgIdlePercent.5MinuteRate
+kafka.network.SocketServer.NetworkProcessorAvgIdlePercent.1MinuteRate
+kafka.network.SocketServer.NetworkProcessorAvgIdlePercent.15MinuteRate
+kafka.network.SocketServer.IdlePercent.networkProcessor.2.meanRate
+kafka.network.SocketServer.IdlePercent.networkProcessor.2.count
+kafka.network.SocketServer.IdlePercent.networkProcessor.2.5MinuteRate
+kafka.network.SocketServer.IdlePercent.networkProcessor.2.1MinuteRate
+kafka.network.SocketServer.IdlePercent.networkProcessor.2.15MinuteRate
+kafka.network.SocketServer.IdlePercent.networkProcessor.1.meanRate
+kafka.network.SocketServer.IdlePercent.networkProcessor.1.count
+kafka.network.SocketServer.IdlePercent.networkProcessor.1.5MinuteRate
+kafka.network.SocketServer.IdlePercent.networkProcessor.1.1MinuteRate
+kafka.network.SocketServer.IdlePercent.networkProcessor.1.15MinuteRate
+kafka.network.SocketServer.IdlePercent.networkProcessor.0.meanRate
+kafka.network.SocketServer.IdlePercent.networkProcessor.0.count
+kafka.network.SocketServer.IdlePercent.networkProcessor.0.1MinuteRate
+kafka.network.SocketServer.IdlePercent.networkProcessor.0.15MinuteRate
+kafka.network.RequestChannel.ResponseQueueSize.processor.0
+kafka.network.RequestChannel.ResponseQueueSize.processor.1
+kafka.network.SocketServer.IdlePercent.networkProcessor.0.5MinuteRate
+kafka.network.SocketServer.NetworkProcessorAvgIdlePercent.count
+kafka.server.BrokerTopicMetrics.BytesInPerSec.1MinuteRate
+kafka.server.BrokerTopicMetrics.BytesOutPerSec.15MinuteRate
+kafka.server.BrokerTopicMetrics.FailedProduceRequestsPerSec.1MinuteRate
+kafka.server.BrokerTopicMetrics.MessagesInPerSec.15MinuteRate
+kafka.server.BrokerTopicMetrics.MessagesInPerSec.meanRate
+kafka.server.KafkaRequestHandlerPool.RequestHandlerAvgIdlePercent.count
+kafka.server.ReplicaManager.IsrExpandsPerSec.1MinuteRate
+kafka.server.ReplicaManager.IsrShrinksPerSec.meanRate
+kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.15MinuteRate
+kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.1MinuteRate
+kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.5MinuteRate
+kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.75percentile
+kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.95percentile
+kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.999percentile
+kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.99percentile
+kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.count
+kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.max
+kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.mean
+kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.meanRate
+kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.median
+kafka.controller.ControllerStats.LeaderElectionRateAndTimeMsstddev
+kafka.controller.ControllerStats.UncleanLeaderElectionsPerSec.15MinuteRate
+kafka.controller.ControllerStats.UncleanLeaderElectionsPerSec.1MinuteRate
+kafka.controller.ControllerStats.UncleanLeaderElectionsPerSec.count
+kafka.controller.ControllerStats.UncleanLeaderElectionsPerSec.meanRate
+kafka.controller.KafkaController.ActiveControllerCount
+kafka.controller.KafkaController.OfflinePartitionsCount
+kafka.controller.KafkaController.PreferredReplicaImbalanceCount
+kafka.log.Log.LogEndOffset.partition
+kafka.log.Log.LogStartOffset.partition
+kafka.log.Log.NumLogSegments.partition
+kafka.log.Log.Size.partition
+kafka.network.RequestChannel.RequestQueueSize
+kafka.network.RequestChannel.ResponseQueueSize
+kafka.network.RequestChannel.ResponseQueueSize.processor.2
+kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.min
+kafka.controller.ControllerStats.UncleanLeaderElectionsPerSec.5MinuteRate
+kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.98percentile

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/files/service-metrics/STORM.txt
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/files/service-metrics/STORM.txt b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/files/service-metrics/STORM.txt
new file mode 100755
index 0000000..04bca00
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/files/service-metrics/STORM.txt
@@ -0,0 +1,7 @@
+Supervisors
+Total Tasks
+Total Slots
+Used Slots
+Topologies
+Total Executors
+Free Slots

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/files/service-metrics/YARN.txt
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/files/service-metrics/YARN.txt b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/files/service-metrics/YARN.txt
new file mode 100755
index 0000000..ce04228
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/files/service-metrics/YARN.txt
@@ -0,0 +1,178 @@
+jvm.JvmMetrics.GcCount
+jvm.JvmMetrics.GcCountPS
+jvm.JvmMetrics.GcTimeMillis
+jvm.JvmMetrics.GcTimeMillisPS
+jvm.JvmMetrics.LogError
+jvm.JvmMetrics.LogFatal
+jvm.JvmMetrics.LogInfo
+jvm.JvmMetrics.LogWarn
+jvm.JvmMetrics.MemHeapCommittedM
+jvm.JvmMetrics.MemHeapMaxM
+jvm.JvmMetrics.MemHeapUsedM
+jvm.JvmMetrics.MemMaxM
+jvm.JvmMetrics.MemNonHeapCommittedM
+jvm.JvmMetrics.MemNonHeapMaxM
+jvm.JvmMetrics.MemNonHeapUsedM
+jvm.JvmMetrics.ThreadsBlocked
+jvm.JvmMetrics.ThreadsNew
+jvm.JvmMetrics.ThreadsRunnable
+jvm.JvmMetrics.ThreadsTerminated
+jvm.JvmMetrics.ThreadsTimedWaiting
+jvm.JvmMetrics.ThreadsWaiting
+mapred.ShuffleMetrics.ShuffleConnections
+mapred.ShuffleMetrics.ShuffleOutputBytes
+mapred.ShuffleMetrics.ShuffleOutputsFailed
+mapred.ShuffleMetrics.ShuffleOutputsOK
+metricssystem.MetricsSystem.DroppedPubAll
+metricssystem.MetricsSystem.NumActiveSinks
+metricssystem.MetricsSystem.NumActiveSources
+metricssystem.MetricsSystem.NumAllSinks
+metricssystem.MetricsSystem.NumAllSources
+metricssystem.MetricsSystem.PublishAvgTime
+metricssystem.MetricsSystem.PublishNumOps
+metricssystem.MetricsSystem.Sink_timelineAvgTime
+metricssystem.MetricsSystem.Sink_timelineDropped
+metricssystem.MetricsSystem.Sink_timelineNumOps
+metricssystem.MetricsSystem.Sink_timelineQsize
+metricssystem.MetricsSystem.SnapshotAvgTime
+metricssystem.MetricsSystem.SnapshotNumOps
+rpc.rpc.CallQueueLength
+rpc.rpc.NumOpenConnections
+rpc.rpc.ReceivedBytes
+rpc.rpc.RpcAuthenticationFailures
+rpc.rpc.RpcAuthenticationSuccesses
+rpc.rpc.RpcAuthorizationFailures
+rpc.rpc.RpcAuthorizationSuccesses
+rpc.rpc.RpcClientBackoff
+rpc.rpc.RpcProcessingTimeAvgTime
+rpc.rpc.RpcProcessingTimeNumOps
+rpc.rpc.RpcQueueTimeAvgTime
+rpc.rpc.RpcQueueTimeNumOps
+rpc.rpc.RpcSlowCalls
+rpc.rpc.SentBytes
+rpcdetailed.rpcdetailed.AllocateAvgTime
+rpcdetailed.rpcdetailed.AllocateNumOps
+rpcdetailed.rpcdetailed.FinishApplicationMasterAvgTime
+rpcdetailed.rpcdetailed.FinishApplicationMasterNumOps
+rpcdetailed.rpcdetailed.GetApplicationReportAvgTime
+rpcdetailed.rpcdetailed.GetApplicationReportNumOps
+rpcdetailed.rpcdetailed.GetClusterMetricsAvgTime
+rpcdetailed.rpcdetailed.GetClusterMetricsNumOps
+rpcdetailed.rpcdetailed.GetClusterNodesAvgTime
+rpcdetailed.rpcdetailed.GetClusterNodesNumOps
+rpcdetailed.rpcdetailed.GetContainerStatusesAvgTime
+rpcdetailed.rpcdetailed.GetContainerStatusesNumOps
+rpcdetailed.rpcdetailed.GetNewApplicationAvgTime
+rpcdetailed.rpcdetailed.GetNewApplicationNumOps
+rpcdetailed.rpcdetailed.GetQueueInfoAvgTime
+rpcdetailed.rpcdetailed.GetQueueInfoNumOps
+rpcdetailed.rpcdetailed.GetQueueUserAclsAvgTime
+rpcdetailed.rpcdetailed.GetQueueUserAclsNumOps
+rpcdetailed.rpcdetailed.HeartbeatAvgTime
+rpcdetailed.rpcdetailed.HeartbeatNumOps
+rpcdetailed.rpcdetailed.NodeHeartbeatAvgTime
+rpcdetailed.rpcdetailed.NodeHeartbeatNumOps
+rpcdetailed.rpcdetailed.RegisterApplicationMasterAvgTime
+rpcdetailed.rpcdetailed.RegisterApplicationMasterNumOps
+rpcdetailed.rpcdetailed.RegisterNodeManagerAvgTime
+rpcdetailed.rpcdetailed.RegisterNodeManagerNumOps
+rpcdetailed.rpcdetailed.StartContainersAvgTime
+rpcdetailed.rpcdetailed.StartContainersNumOps
+rpcdetailed.rpcdetailed.StopContainersAvgTime
+rpcdetailed.rpcdetailed.StopContainersNumOps
+rpcdetailed.rpcdetailed.SubmitApplicationAvgTime
+rpcdetailed.rpcdetailed.SubmitApplicationNumOps
+ugi.UgiMetrics.GetGroupsAvgTime
+ugi.UgiMetrics.GetGroupsNumOps
+ugi.UgiMetrics.LoginFailureAvgTime
+ugi.UgiMetrics.LoginFailureNumOps
+ugi.UgiMetrics.LoginSuccessAvgTime
+ugi.UgiMetrics.LoginSuccessNumOps
+yarn.ClusterMetrics.AMLaunchDelayAvgTime
+yarn.ClusterMetrics.AMLaunchDelayNumOps
+yarn.ClusterMetrics.AMRegisterDelayAvgTime
+yarn.ClusterMetrics.AMRegisterDelayNumOps
+yarn.ClusterMetrics.NumActiveNMs
+yarn.ClusterMetrics.NumDecommissionedNMs
+yarn.ClusterMetrics.NumLostNMs
+yarn.ClusterMetrics.NumRebootedNMs
+yarn.ClusterMetrics.NumUnhealthyNMs
+yarn.NodeManagerMetrics.AllocatedContainers
+yarn.NodeManagerMetrics.AllocatedGB
+yarn.NodeManagerMetrics.AllocatedVCores
+yarn.NodeManagerMetrics.AvailableGB
+yarn.NodeManagerMetrics.AvailableVCores
+yarn.NodeManagerMetrics.BadLocalDirs
+yarn.NodeManagerMetrics.BadLogDirs
+yarn.NodeManagerMetrics.ContainerLaunchDurationAvgTime
+yarn.NodeManagerMetrics.ContainerLaunchDurationNumOps
+yarn.NodeManagerMetrics.ContainersCompleted
+yarn.NodeManagerMetrics.ContainersFailed
+yarn.NodeManagerMetrics.ContainersIniting
+yarn.NodeManagerMetrics.ContainersKilled
+yarn.NodeManagerMetrics.ContainersLaunched
+yarn.NodeManagerMetrics.ContainersRunning
+yarn.NodeManagerMetrics.GoodLocalDirsDiskUtilizationPerc
+yarn.NodeManagerMetrics.GoodLogDirsDiskUtilizationPerc
+yarn.QueueMetrics.Queue=root.AMResourceLimitMB
+yarn.QueueMetrics.Queue=root.AMResourceLimitVCores
+yarn.QueueMetrics.Queue=root.ActiveApplications
+yarn.QueueMetrics.Queue=root.ActiveUsers
+yarn.QueueMetrics.Queue=root.AggregateContainersAllocated
+yarn.QueueMetrics.Queue=root.AggregateContainersReleased
+yarn.QueueMetrics.Queue=root.AllocatedContainers
+yarn.QueueMetrics.Queue=root.AllocatedMB
+yarn.QueueMetrics.Queue=root.AllocatedVCores
+yarn.QueueMetrics.Queue=root.AppAttemptFirstContainerAllocationDelayAvgTime
+yarn.QueueMetrics.Queue=root.AppAttemptFirstContainerAllocationDelayNumOps
+yarn.QueueMetrics.Queue=root.AppsCompleted
+yarn.QueueMetrics.Queue=root.AppsFailed
+yarn.QueueMetrics.Queue=root.AppsKilled
+yarn.QueueMetrics.Queue=root.AppsPending
+yarn.QueueMetrics.Queue=root.AppsRunning
+yarn.QueueMetrics.Queue=root.AppsSubmitted
+yarn.QueueMetrics.Queue=root.AvailableMB
+yarn.QueueMetrics.Queue=root.AvailableVCores
+yarn.QueueMetrics.Queue=root.PendingContainers
+yarn.QueueMetrics.Queue=root.PendingMB
+yarn.QueueMetrics.Queue=root.PendingVCores
+yarn.QueueMetrics.Queue=root.ReservedContainers
+yarn.QueueMetrics.Queue=root.ReservedMB
+yarn.QueueMetrics.Queue=root.ReservedVCores
+yarn.QueueMetrics.Queue=root.UsedAMResourceMB
+yarn.QueueMetrics.Queue=root.UsedAMResourceVCores
+yarn.QueueMetrics.Queue=root.default.AMResourceLimitMB
+yarn.QueueMetrics.Queue=root.default.AMResourceLimitVCores
+yarn.QueueMetrics.Queue=root.default.ActiveApplications
+yarn.QueueMetrics.Queue=root.default.ActiveUsers
+yarn.QueueMetrics.Queue=root.default.AggregateContainersAllocated
+yarn.QueueMetrics.Queue=root.default.AggregateContainersReleased
+yarn.QueueMetrics.Queue=root.default.AllocatedContainers
+yarn.QueueMetrics.Queue=root.default.AllocatedMB
+yarn.QueueMetrics.Queue=root.default.AllocatedVCores
+yarn.QueueMetrics.Queue=root.default.AppAttemptFirstContainerAllocationDelayAvgTime
+yarn.QueueMetrics.Queue=root.default.AppAttemptFirstContainerAllocationDelayNumOps
+yarn.QueueMetrics.Queue=root.default.AppsCompleted
+yarn.QueueMetrics.Queue=root.default.AppsFailed
+yarn.QueueMetrics.Queue=root.default.AppsKilled
+yarn.QueueMetrics.Queue=root.default.AppsPending
+yarn.QueueMetrics.Queue=root.default.AppsRunning
+yarn.QueueMetrics.Queue=root.default.AppsSubmitted
+yarn.QueueMetrics.Queue=root.default.AvailableMB
+yarn.QueueMetrics.Queue=root.default.AvailableVCores
+yarn.QueueMetrics.Queue=root.default.PendingContainers
+yarn.QueueMetrics.Queue=root.default.PendingMB
+yarn.QueueMetrics.Queue=root.default.PendingVCores
+yarn.QueueMetrics.Queue=root.default.ReservedContainers
+yarn.QueueMetrics.Queue=root.default.ReservedMB
+yarn.QueueMetrics.Queue=root.default.ReservedVCores
+yarn.QueueMetrics.Queue=root.default.UsedAMResourceMB
+yarn.QueueMetrics.Queue=root.default.UsedAMResourceVCores
+yarn.QueueMetrics.Queue=root.default.running_0
+yarn.QueueMetrics.Queue=root.default.running_1440
+yarn.QueueMetrics.Queue=root.default.running_300
+yarn.QueueMetrics.Queue=root.default.running_60
+yarn.QueueMetrics.Queue=root.running_0
+yarn.QueueMetrics.Queue=root.running_1440
+yarn.QueueMetrics.Queue=root.running_300
+yarn.QueueMetrics.Queue=root.running_60

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/__init__.py
new file mode 100755
index 0000000..5561e10
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""


[25/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/webhcat_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/webhcat_server.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/webhcat_server.py
new file mode 100755
index 0000000..7496649
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/webhcat_server.py
@@ -0,0 +1,146 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.security_commons import build_expectations, \
+  cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
+  FILE_TYPE_XML
+from webhcat import webhcat
+from webhcat_service import webhcat_service
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyImpl
+
+
+class WebHCatServer(Script):
+  def install(self, env):
+    import params
+    self.install_packages(env)
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    webhcat_service(action='start', upgrade_type=upgrade_type)
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    webhcat_service(action='stop')
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    webhcat()
+
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class WebHCatServerDefault(WebHCatServer):
+  def get_component_name(self):
+    return "hive-webhcat"
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.webhcat_pid_file)
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing WebHCat Stack Upgrade pre-restart")
+    import params
+    env.set_params(params)
+
+    if params.version and compare_versions(format_stack_version(params.version), '4.1.0.0') >= 0:
+      # webhcat has no conf, but uses hadoop home, so verify that regular hadoop conf is set
+      conf_select.select(params.stack_name, "hadoop", params.version)
+      stack_select.select("hive-webhcat", params.version)
+
+  def security_status(self, env):
+    import status_params
+    env.set_params(status_params)
+
+    if status_params.security_enabled:
+      expectations ={}
+      expectations.update(
+        build_expectations(
+          'webhcat-site',
+          {
+            "templeton.kerberos.secret": "secret"
+          },
+          [
+            "templeton.kerberos.keytab",
+            "templeton.kerberos.principal"
+          ],
+          [
+            "templeton.kerberos.keytab"
+          ]
+        )
+      )
+      expectations.update(
+        build_expectations(
+          'hive-site',
+          {
+            "hive.server2.authentication": "KERBEROS",
+            "hive.metastore.sasl.enabled": "true",
+            "hive.security.authorization.enabled": "true"
+          },
+          None,
+          None
+        )
+      )
+
+      security_params = {}
+      security_params.update(get_params_from_filesystem(status_params.hive_conf_dir,
+                                                        {'hive-site.xml': FILE_TYPE_XML}))
+      security_params.update(get_params_from_filesystem(status_params.webhcat_conf_dir,
+                                                        {'webhcat-site.xml': FILE_TYPE_XML}))
+      result_issues = validate_security_config_properties(security_params, expectations)
+      if not result_issues: # If all validations passed successfully
+        try:
+          # Double check the dict before calling execute
+          if 'webhcat-site' not in security_params \
+            or 'templeton.kerberos.keytab' not in security_params['webhcat-site'] \
+            or 'templeton.kerberos.principal' not in security_params['webhcat-site']:
+            self.put_structured_out({"securityState": "UNSECURED"})
+            self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
+            return
+
+          cached_kinit_executor(status_params.kinit_path_local,
+                                status_params.webhcat_user,
+                                security_params['webhcat-site']['templeton.kerberos.keytab'],
+                                security_params['webhcat-site']['templeton.kerberos.principal'],
+                                status_params.hostname,
+                                status_params.tmp_dir)
+          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
+        except Exception as e:
+          self.put_structured_out({"securityState": "ERROR"})
+          self.put_structured_out({"securityStateErrorInfo": str(e)})
+      else:
+        issues = []
+        for cf in result_issues:
+          issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
+        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
+        self.put_structured_out({"securityState": "UNSECURED"})
+    else:
+      self.put_structured_out({"securityState": "UNSECURED"})
+
+
+if __name__ == "__main__":
+  WebHCatServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/webhcat_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/webhcat_service.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/webhcat_service.py
new file mode 100755
index 0000000..0d17bbe
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/webhcat_service.py
@@ -0,0 +1,60 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from resource_management import *
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+from resource_management.core.shell import as_user
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def webhcat_service(action='start', upgrade_type=None):
+  import params
+  if action == 'start' or action == 'stop':
+    Service(params.webhcat_server_win_service_name, action=action)
+
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def webhcat_service(action='start', upgrade_type=None):
+  import params
+
+  environ = {
+    'HADOOP_HOME': params.hadoop_home
+  }
+
+  cmd = format('{webhcat_bin_dir}/webhcat_server.sh')
+
+  if action == 'start':
+    if upgrade_type is not None and params.version:
+      environ['HADOOP_HOME'] = format("/usr/iop/{version}/hadoop")
+
+    daemon_cmd = format('cd {hcat_pid_dir} ; {cmd} start')
+    no_op_test = as_user(format('ls {webhcat_pid_file} >/dev/null 2>&1 && ps -p `cat {webhcat_pid_file}` >/dev/null 2>&1'), user=params.webhcat_user)
+    Execute(daemon_cmd,
+            user=params.webhcat_user,
+            not_if=no_op_test,
+            environment = environ)
+  elif action == 'stop':
+    daemon_cmd = format('cd {hcat_pid_dir} ; {cmd} stop')
+    Execute(daemon_cmd,
+            user = params.webhcat_user,
+            environment = environ)
+    File(params.webhcat_pid_file,
+         action="delete",
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/webhcat_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/webhcat_service_check.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/webhcat_service_check.py
new file mode 100755
index 0000000..0dea584
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/scripts/webhcat_service_check.py
@@ -0,0 +1,117 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import urllib2
+
+from resource_management import *
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from ambari_commons import OSConst
+import time
+
+@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
+def webhcat_service_check():
+  Logger.info("Webhcat smoke test - service status")
+
+  import params
+  url_tests = [
+    "status",
+    #These are the failing ones:
+    #"ddl/database?user.name=hadoop",
+    #"ddl/database/default/table?user.name=hadoop"
+  ]
+
+
+  import socket
+
+  url_host = socket.getfqdn()
+  url_port = params.config["configurations"]["webhcat-site"]["templeton.port"]
+
+  for url_test in url_tests:
+    url_request = "http://{0}:{1}/templeton/v1/{2}".format(url_host, url_port, url_test)
+    url_response = None
+
+    try:
+      # execute the query for the JSON that includes WebHCat status
+      url_response = urllib2.urlopen(url_request, timeout=30)
+
+      status = url_response.getcode()
+      response = url_response.read()
+
+      if status != 200:
+        Logger.warning("Webhcat service check status: {0}".format(status))
+      Logger.info("Webhcat service check response: {0}".format(response))
+    except urllib2.HTTPError as he:
+      raise Fail("Webhcat check {0} failed: {1}".format(url_request, he.msg))
+    finally:
+      if url_response is not None:
+        try:
+          url_response.close()
+        except:
+          pass
+
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def webhcat_service_check():
+  import params
+  File(format("{tmp_dir}/templetonSmoke.sh"),
+       content= StaticFile('templetonSmoke.sh'),
+       mode=0755
+  )
+
+  if params.security_enabled:
+    smokeuser_keytab=params.smoke_user_keytab
+    smoke_user_principal=params.smokeuser_principal
+  else:
+    smokeuser_keytab= "no_keytab"
+    smoke_user_principal="no_principal"
+
+  unique_name = format("{smokeuser}.{timestamp}", timestamp = time.time())
+  templeton_test_script = format("idtest.{unique_name}.pig")
+  templeton_test_input = format("/tmp/idtest.{unique_name}.in")
+  templeton_test_output = format("/tmp/idtest.{unique_name}.out")
+
+  File(format("{tmp_dir}/{templeton_test_script}"),
+       content = Template("templeton_smoke.pig.j2", templeton_test_input=templeton_test_input, templeton_test_output=templeton_test_output),
+       owner=params.hdfs_user
+  )
+
+  params.HdfsResource(format("/tmp/{templeton_test_script}"),
+                      action = "create_on_execute",
+                      type = "file",
+                      source = format("{tmp_dir}/{templeton_test_script}"),
+                      owner = params.smokeuser
+  )
+
+  params.HdfsResource(templeton_test_input,
+                      action = "create_on_execute",
+                      type = "file",
+                      source = "/etc/passwd",
+                      owner = params.smokeuser
+  )
+
+  params.HdfsResource(None, action = "execute")
+
+  cmd = format("{tmp_dir}/templetonSmoke.sh {webhcat_server_host[0]} {smokeuser} {templeton_port} {templeton_test_script} {smokeuser_keytab}"
+               " {security_param} {kinit_path_local} {smoke_user_principal}")
+
+  Execute(cmd,
+          tries=3,
+          try_sleep=5,
+          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+          logoutput=True)

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/templates/hive.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/templates/hive.conf.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/templates/hive.conf.j2
new file mode 100755
index 0000000..8b7f8bd
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/templates/hive.conf.j2
@@ -0,0 +1,35 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{{hive_user}}   - nofile 32000
+{{hive_user}}   - nproc  16000

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/templates/startHiveserver2.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/templates/startHiveserver2.sh.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/templates/startHiveserver2.sh.j2
new file mode 100755
index 0000000..70b418c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/templates/startHiveserver2.sh.j2
@@ -0,0 +1,24 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+HIVE_SERVER2_OPTS=" -hiveconf hive.log.file=hiveserver2.log -hiveconf hive.log.dir=$5"
+HIVE_CONF_DIR=$4 {{hive_bin}}/hiveserver2 -hiveconf hive.metastore.uris=" " ${HIVE_SERVER2_OPTS} > $1 2> $2 &
+echo $!|cat>$3

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/templates/templeton_smoke.pig.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/templates/templeton_smoke.pig.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/templates/templeton_smoke.pig.j2
new file mode 100755
index 0000000..1e0f347
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HIVE/package/templates/templeton_smoke.pig.j2
@@ -0,0 +1,24 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+A = load '{{templeton_test_input}}' using PigStorage(':');
+B = foreach A generate \$0 as id;
+store B into '{{templeton_test_output}}';
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/alerts.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/alerts.json
new file mode 100755
index 0000000..4c9884c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/alerts.json
@@ -0,0 +1,32 @@
+{
+  "KAFKA": {
+    "service": [],
+    "KAFKA_BROKER": [
+      {
+        "name": "kafka_broker_process",
+        "label": "Kafka Broker Process",
+        "description": "This host-level alert is triggered if the Kafka Broker cannot be determined to be up.",
+        "interval": 1,
+        "scope": "HOST",
+        "source": {
+          "type": "PORT",
+          "uri": "{{kafka-broker/port}}",
+          "default_port": 6667,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/configuration/kafka-broker.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/configuration/kafka-broker.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/configuration/kafka-broker.xml
new file mode 100755
index 0000000..095f634
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/configuration/kafka-broker.xml
@@ -0,0 +1,414 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>log.dirs</name>
+    <value>/kafka-logs</value>
+    <description>
+      A comma-separated list of one or more directories in which Kafka data is stored.
+      Each new partition that is created will be placed in the directory which currently has the fewest partitions.
+    </description>
+    <value-attributes>
+      <type>directories</type>
+    </value-attributes>
+  </property>
+  <property>
+    <name>port</name>
+    <value>6667</value>
+    <description>
+      The port on which the server accepts client connections.
+    </description>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+  </property>
+  <property>
+    <name>zookeeper.connect</name>
+    <value>localhost:2181</value>
+    <description>
+      Zookeeper also allows you to add a "chroot" path which will make all kafka data for this cluster appear under a particular path.
+      This is a way to setup multiple Kafka clusters or other applications on the same zookeeper cluster. To do this give a connection
+      string in the form hostname1:port1,hostname2:port2,hostname3:port3/chroot/path which would put all this cluster's data under the
+      path /chroot/path. Note that you must create this path yourself prior to starting the broker and consumers must use the
+      same connection string.
+    </description>
+  </property>
+  <property>
+    <name>message.max.bytes</name>
+    <value>1000000</value>
+    <description>
+      The maximum size of a message that the server can receive.
+      It is important that this property be in sync with the maximum fetch size your consumers use or
+      else an unruly producer will be able to publish messages too large for consumers to consume.
+    </description>
+  </property>
+  <property>
+    <name>num.network.threads</name>
+    <value>3</value>
+    <description>
+      The number of network threads that the server uses for handling network requests.
+      You probably don't need to change this.
+    </description>
+  </property>
+  <property>
+    <name>num.io.threads</name>
+    <value>8</value>
+    <description>
+      The number of I/O threads that the server uses for executing requests. You should have at least as many threads as you have disks.
+    </description>
+  </property>
+  <property>
+    <name>queued.max.requests</name>
+    <value>500</value>
+    <description>The number of requests that can be queued up for processing by the I/O threads before the network threads stop reading in new requests.</description>
+  </property>
+  <property>
+    <name>socket.send.buffer.bytes</name>
+    <value>102400</value>
+    <description>
+      The SO_SNDBUFF buffer the server prefers for socket connections.
+    </description>
+  </property>
+  <property>
+    <name>socket.receive.buffer.bytes</name>
+    <value>102400</value>
+    <description>
+      The SO_RCVBUFF buffer the server prefers for socket connections.
+    </description>
+  </property>
+  <property>
+    <name>socket.request.max.bytes</name>
+    <value>104857600</value>
+    <description>
+      The maximum request size the server will allow. This prevents the server from running out of memory and should be smaller than the Java heap size.
+    </description>
+  </property>
+  <property>
+    <name>num.partitions</name>
+    <value>1</value>
+    <description>
+        The default number of partitions per topic.
+    </description>
+  </property>
+  <property>
+    <name>log.segment.bytes</name>
+    <value>1073741824</value>
+    <description>
+      The maximum request size the server will allow.
+      This prevents the server from running out of memory and should be smaller than the Java heap size.
+    </description>
+  </property>
+  <property>
+    <name>log.roll.hours</name>
+    <value>168</value>
+    <description>
+      This setting will force Kafka to roll a new log segment even if the log.segment.bytes size has not been reached.
+    </description>
+  </property>
+  <property>
+    <name>log.retention.bytes</name>
+    <value>-1</value>
+    <description>
+      The amount of data to retain in the log for each topic-partitions. Note that this is the limit per-partition so multiply by the number of partitions to get the total data retained for the topic. Also note that if both log.retention.hours and log.retention.bytes are both set we delete a segment when either limit is exceeded.
+    </description>
+  </property>
+  <property>
+    <name>log.retention.hours</name>
+    <value>168</value>
+    <description>
+      The number of hours to keep a log segment before it is deleted, i.e. the default data retention window for all topics. Note that if both log.retention.hours and log.retention.bytes are both set we delete a segment when either limit is exceeded.
+    </description>
+  </property>
+  <property>
+    <name>log.cleanup.interval.mins</name>
+    <value>10</value>
+    <description>The frequency in minutes that the log cleaner checks whether any log segment is eligible for deletion to meet the retention policies.
+    </description>
+  </property>
+  <property>
+    <name>log.index.size.max.bytes</name>
+    <value>10485760</value>
+    <description>
+      The maximum size in bytes we allow for the offset index for each log segment. Note that we will always pre-allocate a
+      sparse file with this much space and shrink it down when the log rolls. If the index fills up we will roll a new log segment
+      even if we haven't reached the log.segment.bytes limit.
+    </description>
+  </property>
+  <property>
+    <name>log.index.interval.bytes</name>
+    <value>4096</value>
+    <description>
+      The byte interval at which we add an entry to the offset index. When executing a fetch request the server must do a linear scan for up to this many bytes to find the correct position in the log to begin and end the fetch. So setting this value to be larger will mean larger index files (and a bit more memory usage) but less scanning. However the server will never add more than one index entry per log append (even if more than log.index.interval worth of messages are appended). In general you probably don't need to mess with this value.
+    </description>
+  </property>
+
+  <property>
+    <name>log.flush.scheduler.interval.ms</name>
+    <value>3000</value>
+    <description>
+      The frequency in ms that the log flusher checks whether any log is eligible to be flushed to disk.
+    </description>
+  </property>
+  <property>
+    <name>log.flush.interval.ms</name>
+    <value>3000</value>
+    <description>
+      The maximum time between fsync calls on the log. If used in conjuction with log.flush.interval.messages the log will be flushed when either criteria is met.
+    </description>
+  </property>
+  <property>
+    <name>auto.create.topics.enable</name>
+    <value>true</value>
+    <description>
+      Enable auto creation of topic on the server. If this is set to true then attempts to produce, consume, or fetch metadata for a non-existent topic will automatically create it with the default replication factor and number of partitions.
+    </description>
+  </property>
+  <property>
+    <name>controller.socket.timeout.ms</name>
+    <value>30000</value>
+    <property>The socket timeout for commands from the partition management controller to the replicas.</property>
+  </property>
+  <property>
+    <name>controller.message.queue.size</name>
+    <value>10</value>
+    <description>The buffer size for controller-to-broker-channels</description>
+  </property>
+  <property>
+    <name>default.replication.factor</name>
+    <value>1</value>
+    <description>The default replication factor for automatically created topics.</description>
+  </property>
+  <property>
+    <name>replica.lag.time.max.ms</name>
+    <value>10000</value>
+    <description>If a follower hasn't sent any fetch requests for this window of time, the leader will remove the follower from ISR (in-sync replicas) and treat it as dead.</description>
+  </property>
+  <property>
+    <name>replica.lag.max.messages</name>
+    <value>4000</value>
+    <description>
+      If a replica falls more than this many messages behind the leader, the leader will remove the follower from ISR and treat it as dead.
+    </description>
+  </property>
+  <property>
+    <name>replica.socket.timeout.ms</name>
+    <value>30000</value>
+    <description>The socket timeout for network requests to the leader for replicating data.</description>
+  </property>
+  <property>
+    <name>replica.socket.receive.buffer.bytes</name>
+    <value>65536</value>
+    <description>The socket receive buffer for network requests to the leader for replicating data.</description>
+  </property>
+  <property>
+    <name>replica.fetch.max.bytes</name>
+    <value>1048576</value>
+    <description>The number of byes of messages to attempt to fetch for each partition in the fetch requests the replicas send to the leader.</description>
+  </property>
+  <property>
+    <name>replica.fetch.wait.max.ms</name>
+    <value>500</value>
+    <description>The maximum amount of time to wait time for data to arrive on the leader in the fetch requests sent by the replicas to the leader.</description>
+  </property>
+  <property>
+    <name>replica.fetch.min.bytes</name>
+    <value>1</value>
+    <description>Minimum bytes expected for each fetch response for the fetch requests from the replica to the leader. If not enough bytes, wait up to replica.fetch.wait.max.ms for this many bytes to arrive.
+    </description>
+  </property>
+  <property>
+    <name>num.replica.fetchers</name>
+    <value>1</value>
+    <description>
+      Number of threads used to replicate messages from leaders. Increasing this value can increase the degree of I/O parallelism in the follower broker.
+    </description>
+  </property>
+  <property>
+    <name>replica.high.watermark.checkpoint.interval.ms</name>
+    <value>5000</value>
+    <description>The frequency with which each replica saves its high watermark to disk to handle recovery.</description>
+  </property>
+  <property>
+    <name>fetch.purgatory.purge.interval.requests</name>
+    <value>10000</value>
+    <description>The purge interval (in number of requests) of the fetch request purgatory.</description>
+  </property>
+  <property>
+    <name>producer.purgatory.purge.interval.requests</name>
+    <value>10000</value>
+    <description>The purge interval (in number of requests) of the producer request purgatory.</description>
+  </property>
+  <property>
+    <name>zookeeper.session.timeout.ms</name>
+    <value>30000</value>
+    <description>Zookeeper session timeout. If the server fails to heartbeat to zookeeper within this period of time it is considered dead. If you set this too low the server may be falsely considered dead; if you set it too high it may take too long to recognize a truly dead server.</description>
+  </property>
+  <property>
+    <name>zookeeper.connection.timeout.ms</name>
+    <value>6000</value>
+    <description>The maximum amount of time that the client waits to establish a connection to zookeeper.</description>
+  </property>
+  <property>
+    <name>zookeeper.sync.time.ms</name>
+    <value>2000</value>
+    <description>How far a ZK follower can be behind a ZK leader.</description>
+  </property>
+  <property>
+    <name>controlled.shutdown.enable</name>
+    <value>false</value>
+    <description>Enable controlled shutdown of the broker. If enabled, the broker will move all leaders on it to some other brokers before shutting itself down. This reduces the unavailability window during shutdown.</description>
+  </property>
+  <property>
+    <name>controlled.shutdown.max.retries</name>
+    <value>3</value>
+    <description>Number of retries to complete the controlled shutdown successfully before executing an unclean shutdown.</description>
+  </property>
+  <property>
+    <name>controlled.shutdown.retry.backoff.ms</name>
+    <value>5000</value>
+    <description>
+      Backoff time between shutdown retries.
+    </description>
+  </property>
+  <property>
+    <name>kafka.metrics.reporters</name>
+    <value>{{kafka_metrics_reporters}}</value>
+    <description>
+      kafka timeline metrics reporter
+    </description>
+  </property>
+  <property>
+    <name>kafka.timeline.metrics.reporter.enabled</name>
+    <value>true</value>
+    <description>Kafka timeline metrics reporter enable</description>
+  </property>
+  <property>
+    <name>kafka.timeline.metrics.host</name>
+    <value>{{metric_collector_host}}</value>
+    <description>Timeline host</description>
+  </property>
+  <property>
+    <name>kafka.timeline.metrics.port</name>
+    <value>{{metric_collector_port}}</value>
+    <description>Timeline port</description>
+  </property>
+  <property>
+    <name>kafka.timeline.metrics.reporter.sendInterval</name>
+    <value>5900</value>
+    <description>Timeline metrics reporter send interval</description>
+  </property>
+  <property>
+    <name>kafka.timeline.metrics.maxRowCacheSize</name>
+    <value>10000</value>
+    <description>Timeline metrics reporter send interval</description>
+  </property>
+  <property>
+    <name>auto.leader.rebalance.enable</name>
+    <value>true</value>
+    <description>Enables auto leader balancing. A background thread checks and triggers leader balance if required at regular intervals</description>
+  </property>
+  <property>
+    <name>num.recovery.threads.per.data.dir</name>
+    <value>1</value>
+    <description>The number of threads per data directory to be used for log recovery at startup and flushing at shutdown</description>
+  </property>
+  <property>
+    <name>min.insync.replicas</name>
+    <value>1</value>
+    <description>define the minimum number of replicas in ISR needed to satisfy a produce request with required.acks=-1 (or all)</description>
+  </property>
+  <property>
+    <name>leader.imbalance.per.broker.percentage</name>
+    <value>10</value>
+    <description>The ratio of leader imbalance allowed per broker. The controller would trigger a leader balance if it goes above this value per broker. The value is specified in percentage.</description>
+  </property>
+  <property>
+    <name>leader.imbalance.check.interval.seconds</name>
+    <value>300</value>
+    <description>The frequency with which the partition rebalance check is triggered by the controller</description>
+  </property>
+  <property>
+    <name>offset.metadata.max.bytes</name>
+    <value>4096</value>
+    <description>The maximum size for a metadata entry associated with an offset commit</description>
+  </property>
+  <property>
+    <name>offsets.load.buffer.size</name>
+    <value>5242880</value>
+    <description>Batch size for reading from the offsets segments when loading offsets into the cache.</description>
+  </property>
+  <property>
+    <name>offsets.topic.replication.factor</name>
+    <value>3</value>
+    <description>The replication factor for the offsets topic (set higher to ensure availability).
+    To ensure that the effective replication factor of the offsets topic is the configured value,
+    the number of alive brokers has to be at least the replication factor at the time of the
+    first request for the offsets topic. If not, either the offsets topic creation will fail or it will get a replication factor of min(alive brokers, configured replication factor).</description>
+  </property>
+  <property>
+    <name>offsets.topic.num.partitions</name>
+    <value>50</value>
+    <description>The number of partitions for the offset commit topic (should not change after deployment)</description>
+  </property>
+  <property>
+    <name>offsets.topic.segment.bytes</name>
+    <value>104857600</value>
+    <description>The offsets topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads</description>
+  </property>
+  <property>
+    <name>offsets.topic.compression.codec</name>
+    <value>0</value>
+    <description>Compression codec for the offsets topic - compression may be used to achieve \"atomic\" commits. Default is NoCompression. For Gzip add value 1 , SnappyCompression add value 2, LZ4CompressionCodec 3.
+    </description>
+  </property>
+  <property>
+    <name>offsets.retention.minutes</name>
+    <value>86400000</value>
+    <description>Log retention window in minutes for offsets topic</description>
+  </property>
+  <property>
+    <name>offsets.retention.check.interval.ms</name>
+    <value>600000</value>
+    <description>Frequency at which to check for stale offsets</description>
+  </property>
+  <property>
+    <name>offsets.commit.timeout.ms</name>
+    <value>5000</value>
+    <description>Offset commit will be delayed until all replicas for the offsets topic receive the commit or this timeout is reached. This is similar to the producer request timeout.</description>
+  </property>
+  <property>
+    <name>offsets.commit.required.acks</name>
+    <value>-1</value>
+    <description>The required acks before the commit can be accepted. In general, the default (-1) should not be overridden</description>
+  </property>
+  <property>
+    <name>delete.topic.enable</name>
+    <value>false</value>
+    <description>Enables delete topic. Delete topic through the admin tool will have no effect if this config is turned off</description>
+  </property>
+  <property>
+    <name>compression.type</name>
+    <description>Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', lz4). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.</description>
+    <value>producer</value>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/configuration/kafka-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/configuration/kafka-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/configuration/kafka-env.xml
new file mode 100755
index 0000000..23576e9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/configuration/kafka-env.xml
@@ -0,0 +1,69 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>kafka_user</name>
+    <value>kafka</value>
+    <property-type>USER</property-type>
+    <description></description>
+  </property>
+  <property>
+    <name>kafka_log_dir</name>
+    <value>/var/log/kafka</value>
+    <description></description>
+  </property>
+  <property>
+    <name>kafka_pid_dir</name>
+    <display-name>Kafka PID dir</display-name>
+    <value>/var/run/kafka</value>
+    <description></description>
+    <value-attributes>
+      <type>directory</type>
+      <editable-only-at-install>true</editable-only-at-install>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <!-- kafka-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for kafka-env.sh file</description>
+    <value>
+#!/bin/bash
+
+# Set KAFKA specific environment variables here.
+
+# The java implementation to use.
+export JAVA_HOME={{java64_home}}
+export PATH=$PATH:$JAVA_HOME/bin
+export PID_DIR={{kafka_pid_dir}}
+export LOG_DIR={{kafka_log_dir}}
+export KAFKA_KERBEROS_PARAMS={{kafka_kerberos_params}}
+# Add kafka sink to classpath and related depenencies
+if [ -e "/usr/lib/ambari-metrics-kafka-sink/ambari-metrics-kafka-sink.jar" ]; then
+  export CLASSPATH=$CLASSPATH:/usr/lib/ambari-metrics-kafka-sink/ambari-metrics-kafka-sink.jar
+  export CLASSPATH=$CLASSPATH:/usr/lib/ambari-metrics-kafka-sink/lib/*
+fi
+    </value>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/configuration/kafka-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/configuration/kafka-log4j.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/configuration/kafka-log4j.xml
new file mode 100755
index 0000000..901859e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/configuration/kafka-log4j.xml
@@ -0,0 +1,116 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <description>Custom log4j.properties</description>
+    <value>
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+kafka.logs.dir=logs
+
+log4j.rootLogger=INFO, stdout
+
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
+
+log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH
+log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log
+log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
+
+log4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH
+log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log
+log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
+
+log4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH
+log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log
+log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
+
+log4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH
+log4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log
+log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
+
+log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH
+log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log
+log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
+
+# Turn on all our debugging info
+#log4j.logger.kafka.producer.async.DefaultEventHandler=DEBUG, kafkaAppender
+#log4j.logger.kafka.client.ClientUtils=DEBUG, kafkaAppender
+#log4j.logger.kafka.perf=DEBUG, kafkaAppender
+#log4j.logger.kafka.perf.ProducerPerformance$ProducerThread=DEBUG, kafkaAppender
+#log4j.logger.org.I0Itec.zkclient.ZkClient=DEBUG
+log4j.logger.kafka=INFO, kafkaAppender
+log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender
+log4j.additivity.kafka.network.RequestChannel$=false
+
+#log4j.logger.kafka.network.Processor=TRACE, requestAppender
+#log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender
+#log4j.additivity.kafka.server.KafkaApis=false
+log4j.logger.kafka.request.logger=WARN, requestAppender
+log4j.additivity.kafka.request.logger=false
+
+log4j.logger.kafka.controller=TRACE, controllerAppender
+log4j.additivity.kafka.controller=false
+
+log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender
+log4j.additivity.kafka.log.LogCleaner=false
+
+log4j.logger.state.change.logger=TRACE, stateChangeAppender
+log4j.additivity.state.change.logger=false
+
+   </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/kerberos.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/kerberos.json
new file mode 100755
index 0000000..2aae44a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/kerberos.json
@@ -0,0 +1,49 @@
+{
+  "services": [
+    {
+      "name": "KAFKA",
+      "identities": [
+        {
+          "name": "/smokeuser"
+        },
+      ],
+      "configurations": [
+        {
+          "kafka-broker": {
+              "authorizer.class.name": "kafka.security.auth.SimpleAclAuthorizer",
+              "principal.to.local.class":"kafka.security.auth.KerberosPrincipalToLocal",
+              "super.users": "User:${kafka-env/kafka_user}",
+              "security.inter.broker.protocol": "SASL_PLAINTEXT"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "KAFKA_BROKER",
+          "identities": [
+            {
+              "name": "kafka_broker",
+              "principal": {
+                "value": "${kafka-env/kafka_user}/_HOST@${realm}",
+                "type": "service",
+                "configuration": "kafka-env/kafka_principal_name"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/kafka.service.keytab",
+                "owner": {
+                  "name": "${kafka-env/kafka_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "kafka-env/kafka_keytab"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/metainfo.xml
new file mode 100755
index 0000000..79bafc0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/metainfo.xml
@@ -0,0 +1,83 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>KAFKA</name>
+      <displayName>Kafka</displayName>
+      <comment>A high-throughput distributed messaging system</comment>
+      <version>0.8.2</version>
+      <components>
+        <component>
+          <name>KAFKA_BROKER</name>
+          <displayName>Kafka Broker</displayName>
+          <category>MASTER</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+              <scope>cluster</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/kafka_broker.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+        </component>
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>redhat5,redhat6,redhat7,suse11</osFamily>
+          <packages>
+            <package>
+              <name>kafka_4_1_*</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>kafka_4_1_.*</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+      <requiredServices>
+        <service>ZOOKEEPER</service>
+      </requiredServices>
+      <configuration-dependencies>
+        <config-type>kafka-broker</config-type>
+        <config-type>kafka-env</config-type>
+        <config-type>kafka-log4j</config-type>
+      </configuration-dependencies>
+      <restartRequiredAfterChange>true</restartRequiredAfterChange>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/metrics.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/metrics.json
new file mode 100755
index 0000000..2adcecc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/metrics.json
@@ -0,0 +1,264 @@
+{
+  "KAFKA_BROKER": {
+    "Component": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "default": {
+            "metrics/jvm/uptime": {
+              "metric": "jvm.uptime",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/heap_usage": {
+              "metric": "jvm.heap_usage",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/non_heap_usage": {
+              "metric": "jvm.non_heap_usage",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/thread-states/runnable": {
+              "metric": "jvm.thread-states.runnable",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/thread-states/blocked": {
+              "metric": "jvm.thread-states.blocked",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/thread-states/timed_waiting": {
+              "metric": "jvm.thread-states.timed_waiting",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/thread-states/terminated": {
+              "metric": "jvm.thread-states.terminated",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/thread_count": {
+              "metric": "jvm.thread_count",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/daemon_thread_count": {
+              "metric": "jvm.daemon_thread_count",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/server/BrokerTopicMetrics/AllTopicsMessagesInPerSec/1MinuteRate": {
+              "metric": "kafka.server.BrokerTopicMetrics.MessagesInPerSec.1MinuteRate",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/server/BrokerTopicMetrics/AllTopicsMessagesInPerSec/5MinuteRate": {
+              "metric": "kafka.server.BrokerTopicMetrics.MessagesInPerSec.5MinuteRate",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/kafka/server/BrokerTopicMetrics/AllTopicsMessagesInPerSec/15MinuteRate": {
+              "metric": "kafka.server.BrokerTopicMetrics.MessagesInPerSec.15MinuteRate",
+              "pointInTime": false,
+              "temporal": true
+            },
+            "metrics/kafka/server/BrokerTopicMetrics/AllTopicsMessagesInPerSec/meanRate": {
+              "metric": "kafka.server.BrokerTopicMetrics/MessagesInPerSec/meanRate",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/server/BrokerTopicMetrics/AllTopicsMessagesInPerSec/count": {
+              "metric": "kafka.server.BrokerTopicMetrics/MessagesInPerSec.counte",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/server/BrokerTopicMetrics/AllTopicsBytesInPerSec/1MinuteRate": {
+              "metric": "kafka.server.BrokerTopicMetrics.BytesInPerSec.1MinuteRate",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/server/BrokerTopicMetrics/AllTopicsBytesInPerSec/5MinuteRate": {
+              "metric": "kafka.server.BrokerTopicMetrics.BytesInPerSec.5MinuteRate",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/server/BrokerTopicMetrics/AllTopicsBytesInPerSec/15MinuteRate": {
+              "metric": "kafka.server.BrokerTopicMetrics.BytesInPerSec.15MinuteRate",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/server/BrokerTopicMetrics/AllTopicsBytesInPerSec/meanRate": {
+              "metric": "kafka.server.BrokerTopicMetrics.BytesInPerSec.meanRate",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/server/BrokerTopicMetrics/AllTopicsBytesInPerSec/count": {
+              "metric": "kafka.server.BrokerTopicMetrics.BytesInPerSec.count",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/server/BrokerTopicMetrics/AllTopicsBytesOutPerSec/1MinuteRate": {
+              "metric": "kafka.server.BrokerTopicMetrics.BytesOutPerSec.1MinuteRate",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/server/BrokerTopicMetrics/AllTopicsBytesOutPerSec/5MinuteRate": {
+              "metric": "kafka.server.BrokerTopicMetrics.BytesOutPerSec.5MinuteRate",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/server/BrokerTopicMetrics/AllTopicsBytesOutPerSec/15MinuteRate": {
+              "metric": "kafka.server.BrokerTopicMetrics.BytesOutPerSec.15MinuteRate",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/server/BrokerTopicMetrics/AllTopicsBytesOutPerSec/meanRate": {
+              "metric": "kafka.server.BrokerTopicMetrics.BytesOutPerSec.meanRate",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/server/BrokerTopicMetrics/AllTopicsBytesOutPerSec/count": {
+              "metric": "kafka.server.BrokerTopicMetrics.BytesOutPerSec.count",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/controller/KafkaController/ActiveControllerCount": {
+              "metric": "kafka.controller.KafkaController.ActiveControllerCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/log/LogFlushStats/LogFlushRateAndTimeMs/meanRate": {
+              "metric": "kafka.log.LogFlushStats.LogFlushRateAndTimeMs.meanRate",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/log/LogFlushStats/LogFlushRateAndTimeMs/1MinuteRate": {
+              "metric": "kafka.log.LogFlushStats.LogFlushRateAndTimeMs.1MinuteRate",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/log/LogFlushStats/LogFlushRateAndTimeMs/5MinuteRate": {
+              "metric": "kafka.log.LogFlushStats.LogFlushRateAndTimeMs.1MinuteRate",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/log/LogFlushStats/LogFlushRateAndTimeMs/15MinuteRate": {
+              "metric": "kafka.log.LogFlushStats.LogFlushRateAndTimeMs.15MinuteRate",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/log/LogFlushStats/LogFlushRateAndTimeMs/count": {
+              "metric": "kafka.log.LogFlushStats.LogFlushRateAndTimeMs.count",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/controller/ControllerStats/LeaderElectionRateAndTimeMs/meanRate": {
+              "metric": "kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.meanRate",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/controller/ControllerStats/LeaderElectionRateAndTimeMs/1MinuteRate": {
+              "metric": "kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.1MinuteRate",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/controller/ControllerStats/LeaderElectionRateAndTimeMs/5MinuteRate": {
+              "metric": "kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.5MinuteRate",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/controller/ControllerStats/LeaderElectionRateAndTimeMs/15MinuteRate": {
+              "metric": "kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.15MinuteRate",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/controller/ControllerStats/LeaderElectionRateAndTimeMs/count": {
+              "metric": "kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.count",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/controller/ControllerStats/UncleanLeaderElectionsPerSec/1MinuteRate": {
+              "metric": "kafka.controller.ControllerStats.UncleanLeaderElectionsPerSec.1MinuteRate",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/controller/ControllerStats/UncleanLeaderElectionsPerSec/5MinuteRate": {
+              "metric": "kafka.controller.ControllerStats.UncleanLeaderElectionsPerSec.5MinuteRate",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/controller/ControllerStats/UncleanLeaderElectionsPerSec/15MinuteRate": {
+              "metric": "kafka.controller.ControllerStats.UncleanLeaderElectionsPerSec.15MinuteRate",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/controller/ControllerStats/OfflinePartitionsCount": {
+              "metric": "kafka.controller.ControllerStats.OfflinePartitionsCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/server/ReplicaManager/PartitionCount": {
+              "metric": "kafka.server.ReplicaManager.PartitionCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/server/ReplicaManager/LeaderCount": {
+              "metric": "kafka.server.ReplicaManager.LeaderCount",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/server/ReplicaManager/UnderReplicatedPartitions": {
+              "metric": "kafka.server.ReplicaManager.UnderReplicatedPartitions",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/server/ReplicaManager/ISRShrinksPerSec": {
+              "metric": "kafka.server.ReplicaManager.ISRShrinksPerSec",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/server/ReplicaManager/ISRExpandsPerSec": {
+              "metric": "kafka.server.ReplicaManager.ISRExpandsPerSec",
+              "pointInTime": true,
+              "temporal": true
+            },
+
+            "metrics/kafka/server/ReplicaFetcherManager/Replica-MaxLag": {
+              "metric": "kafka.server.ReplicaFetcherManager.MaxLag.clientId.Replica",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/server/ProducerRequestPurgatory/PurgatorySize": {
+              "metric": "kafka.server.ProducerRequestPurgatory.PurgatorySize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/server/FetchRequestPurgatory/PurgatorySize": {
+              "metric": "kafka.server.FetchRequestPurgatory.PurgatorySize",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/cluster/Partition/$1-UnderReplicated": {
+              "metric": "kafka.cluster.Partition.(\\w+)-UnderReplicated",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/consumer/ConsumerFetcherManager/$1-MaxLag": {
+              "metric": "kafka.consumer.ConsumerFetcherManager.(\\w+)-MaxLag",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/kafka/consumer/ConsumerFetcherManager/$1-MinFetch": {
+              "metric": "kafka.consumer.ConsumerFetcherManager.(\\w+)-MinFetch",
+              "pointInTime": true,
+              "temporal": true
+            }
+          }
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/package/scripts/kafka.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/package/scripts/kafka.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/package/scripts/kafka.py
new file mode 100755
index 0000000..4fd17d6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/package/scripts/kafka.py
@@ -0,0 +1,239 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import collections
+import os
+
+from resource_management import *
+from resource_management.libraries.resources.properties_file import PropertiesFile
+from resource_management.libraries.resources.template_config import TemplateConfig
+from resource_management.libraries.functions.version import format_stack_version, compare_versions
+from resource_management.core.resources.system import Directory, Execute, File, Link
+from resource_management.core.source import StaticFile, Template, InlineTemplate
+from resource_management.libraries.functions import format
+from resource_management.core.logger import Logger
+from resource_management.libraries.functions import Direction
+import sys, os
+from copy import deepcopy
+from resource_management.core import sudo
+
+def kafka(upgrade_type=None):
+    import params
+
+    ensure_base_directories()
+
+    Directory([params.kafka_log_dir, params.kafka_pid_dir, params.conf_dir],
+              mode=0755,
+              cd_access='a',
+              owner=params.kafka_user,
+              group=params.user_group,
+              create_parents=True
+          )
+    brokerid = str(sorted(params.kafka_hosts).index(params.hostname))
+    kafka_server_config = mutable_config_dict(params.config['configurations']['kafka-broker'])
+
+    #GM has 4.1 Kafka(0.9)
+    #GM 4.1 does not have broker.id in server.properties, where as other 4.1 builds have broker.id in server.properties
+    #And from 4.1 broker.id needs to be carried to 4.2.5
+    #so, checking server.properties to carry forward broker.id or not
+    kafka_config = {}
+    content = sudo.read_file(params.conf_dir + "/server.properties")
+    for line in content.splitlines():
+      if line.startswith("#") or not line.strip():
+        continue
+      key, value = line.split("=")
+      kafka_config[key] = value.replace("\n", "")
+
+    Logger.info("Express Upgrade From 4.1 to 4.2.5")
+    if 'broker.id' in kafka_config:
+      Logger.info("IOP 4.1 with Kafka 0.8")
+      kafka_server_config['broker.id'] = brokerid
+      kafka_server_config['host.name'] = params.hostname
+    else:
+      Logger.info("IOP 4.1 with Kafka 0.9")
+      listeners = kafka_server_config['listeners'].replace("localhost", params.hostname)
+      kafka_server_config['listeners'] = listeners
+
+      if params.security_enabled and params.kafka_kerberos_enabled:
+        Logger.info("Kafka kerberos security is enabled.")
+        if "SASL" not in listeners:
+          listeners = listeners.replace("PLAINTEXT", "SASL_PLAINTEXT")
+
+          kafka_server_config['listeners'] = listeners
+          kafka_server_config['advertised.listeners'] = listeners
+          Logger.info(format("Kafka advertised listeners: {listeners}"))
+
+      if 'advertised.listeners' in kafka_server_config:
+        advertised_listeners = kafka_server_config['advertised.listeners'].replace("localhost", params.hostname)
+        kafka_server_config['advertised.listeners'] = advertised_listeners
+        Logger.info(format("Kafka advertised listeners: {advertised_listeners}"))
+
+    if params.upgrade_direction is not None and params.upgrade_direction == Direction.UPGRADE:
+        listeners = kafka_server_config['listeners'].replace("localhost", params.hostname)
+        kafka_server_config['listeners'] = listeners
+        if 'advertised.listeners' in kafka_server_config:
+            advertised_listeners = kafka_server_config['advertised.listeners'].replace("localhost", params.hostname)
+            kafka_server_config['advertised.listeners'] = advertised_listeners
+            Logger.info(format("Kafka advertised listeners: {advertised_listeners}"))
+
+    kafka_server_config['kafka.metrics.reporters'] = params.kafka_metrics_reporters
+    if(params.has_metric_collector):
+            kafka_server_config['kafka.timeline.metrics.host'] = params.metric_collector_host
+            kafka_server_config['kafka.timeline.metrics.port'] = params.metric_collector_port
+
+    kafka_data_dir = kafka_server_config['log.dirs']
+    Directory(filter(None,kafka_data_dir.split(",")),
+              mode=0755,
+              cd_access='a',
+              owner=params.kafka_user,
+              group=params.user_group,
+              create_parents=True)
+
+    PropertiesFile("server.properties",
+                      dir=params.conf_dir,
+                      properties=kafka_server_config,
+                      owner=params.kafka_user,
+                      group=params.user_group,
+    )
+
+    File(format("{conf_dir}/kafka-env.sh"),
+          owner=params.kafka_user,
+          content=InlineTemplate(params.kafka_env_sh_template)
+     )
+
+    if (params.log4j_props != None):
+        File(format("{conf_dir}/log4j.properties"),
+             mode=0644,
+             group=params.user_group,
+             owner=params.kafka_user,
+             content=params.log4j_props
+         )
+
+    if params.security_enabled and params.kafka_kerberos_enabled:
+        TemplateConfig(format("{conf_dir}/kafka_jaas.conf"),
+                         owner=params.kafka_user)
+
+
+    setup_symlink(params.kafka_managed_pid_dir, params.kafka_pid_dir)
+    setup_symlink(params.kafka_managed_log_dir, params.kafka_log_dir)
+
+
+def mutable_config_dict(kafka_broker_config):
+    kafka_server_config = {}
+    for key, value in kafka_broker_config.iteritems():
+        kafka_server_config[key] = value
+    return kafka_server_config
+
+# Used to workaround the hardcoded pid/log dir used on the kafka bash process launcher
+def setup_symlink(kafka_managed_dir, kafka_ambari_managed_dir):
+  import params
+  backup_folder_path = None
+  backup_folder_suffix = "_tmp"
+  if kafka_ambari_managed_dir != kafka_managed_dir:
+    if os.path.exists(kafka_managed_dir) and not os.path.islink(kafka_managed_dir):
+
+      # Backup existing data before delete if config is changed repeatedly to/from default location at any point in time time, as there may be relevant contents (historic logs)
+      backup_folder_path = backup_dir_contents(kafka_managed_dir, backup_folder_suffix)
+
+      Directory(kafka_managed_dir,
+                action="delete",
+                create_parents=True)
+
+    elif os.path.islink(kafka_managed_dir) and os.path.realpath(kafka_managed_dir) != kafka_ambari_managed_dir:
+      Link(kafka_managed_dir,
+           action="delete")
+
+    if not os.path.islink(kafka_managed_dir):
+      Link(kafka_managed_dir,
+           to=kafka_ambari_managed_dir)
+
+  elif os.path.islink(kafka_managed_dir): # If config is changed and coincides with the kafka managed dir, remove the symlink and physically create the folder
+    Link(kafka_managed_dir,
+         action="delete")
+
+    Directory(kafka_managed_dir,
+              mode=0755,
+              cd_access='a',
+              owner=params.kafka_user,
+              group=params.user_group,
+              create_parents=True)
+
+  if backup_folder_path:
+    # Restore backed up files to current relevant dirs if needed - will be triggered only when changing to/from default path;
+    for file in os.listdir(backup_folder_path):
+      File(os.path.join(kafka_managed_dir,file),
+           owner=params.kafka_user,
+           content = StaticFile(os.path.join(backup_folder_path,file)))
+
+    # Clean up backed up folder
+    Directory(backup_folder_path,
+              action="delete",
+              create_parents=True)
+
+
+# Uses agent temp dir to store backup files
+def backup_dir_contents(dir_path, backup_folder_suffix):
+  import params
+  backup_destination_path = params.tmp_dir + os.path.normpath(dir_path)+backup_folder_suffix
+  Directory(backup_destination_path,
+            mode=0755,
+            cd_access='a',
+            owner=params.kafka_user,
+            group=params.user_group,
+            create_parents=True
+  )
+  # Safely copy top-level contents to backup folder
+  for file in os.listdir(dir_path):
+    File(os.path.join(backup_destination_path, file),
+         owner=params.kafka_user,
+         content = StaticFile(os.path.join(dir_path,file)))
+
+  return backup_destination_path
+
+
+def ensure_base_directories():
+  """
+  Make basic Kafka directories, and make sure that their ownership is correct
+  """
+  import params
+  base_dirs = [params.kafka_log_dir, params.kafka_pid_dir, params.conf_dir]
+  Directory(base_dirs[:],  # Todo: remove list copy when AMBARI-14373 is fixed
+            mode=0755,
+            cd_access='a',
+            owner=params.kafka_user,
+            group=params.user_group,
+            create_parents=True
+            )
+  set_dir_ownership(base_dirs)
+
+def set_dir_ownership(targets):
+  import params
+  if isinstance(targets, collections.Iterable):
+    directories = targets
+  else:  # If target is a single object, convert it to list
+    directories = [targets]
+  for directory in directories:
+    # If path is empty or a single slash,
+    # may corrupt filesystem permissions
+    if len(directory) > 1:
+      Execute(('chown', '-R', format("{kafka_user}:{user_group}"), directory),
+            sudo=True)
+    else:
+      Logger.warning("Permissions for the folder \"%s\" were not updated due to "
+            "empty path passed: " % directory)

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/package/scripts/kafka_broker.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/package/scripts/kafka_broker.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/package/scripts/kafka_broker.py
new file mode 100755
index 0000000..8799658
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/package/scripts/kafka_broker.py
@@ -0,0 +1,111 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import Direction
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
+from resource_management import Script
+from resource_management.core.logger import Logger
+from resource_management.core.resources.system import Execute, File, Directory
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.check_process_status import check_process_status
+from kafka import ensure_base_directories
+
+import sys
+import upgrade
+
+from kafka import kafka
+
+class KafkaBroker(Script):
+
+  def get_component_name(self):
+    return "kafka-broker"
+
+  def install(self, env):
+    self.install_packages(env)
+
+  def configure(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    kafka(upgrade_type=upgrade_type)
+
+  '''def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    upgrade.prestart(env, "kafka-broker")'''
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    if params.version and compare_versions(format_stack_version(params.version), '4.1.0.0') >= 0:
+      stack_select.select("kafka-broker", params.version)
+
+    if params.version and compare_versions(format_stack_version(params.version), '4.1.0.0') >= 0:
+      conf_select.select(params.stack_name, "kafka", params.version)
+
+    # This is extremely important since it should only be called if crossing the IOP 4.2 boundary.
+    if params.current_version and params.version and params.upgrade_direction:
+      src_version = dst_version = None
+      if params.upgrade_direction == Direction.UPGRADE:
+        src_version = format_stack_version(params.current_version)
+        dst_version = format_stack_version(params.version)
+      else:
+        # These represent the original values during the UPGRADE direction
+        src_version = format_stack_version(params.version)
+        dst_version = format_stack_version(params.downgrade_from_version)
+
+      if compare_versions(src_version, '4.2.0.0') < 0 and compare_versions(dst_version, '4.2.0.0') >= 0:
+        # Upgrade from IOP 4.1 to 4.2, Calling the acl migration script requires the configs to be present.
+        self.configure(env, upgrade_type=upgrade_type)
+        upgrade.run_migration(env, upgrade_type)
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env, upgrade_type=upgrade_type)
+    daemon_cmd = format('source {params.conf_dir}/kafka-env.sh ; {params.kafka_bin} start')
+    no_op_test = format('ls {params.kafka_pid_file} >/dev/null 2>&1 && ps -p `cat {params.kafka_pid_file}` >/dev/null 2>&1')
+    Execute(daemon_cmd,
+            user=params.kafka_user,
+            not_if=no_op_test
+    )
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    ensure_base_directories()
+    daemon_cmd = format('source {params.conf_dir}/kafka-env.sh; {params.kafka_bin} stop')
+    Execute(daemon_cmd,
+            user=params.kafka_user,
+    )
+    File (params.kafka_pid_file,
+          action = "delete"
+    )
+
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.kafka_pid_file)
+
+if __name__ == "__main__":
+  KafkaBroker().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/package/scripts/kafka_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/package/scripts/kafka_upgrade.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/package/scripts/kafka_upgrade.py
new file mode 100755
index 0000000..734e9ec
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/KAFKA/package/scripts/kafka_upgrade.py
@@ -0,0 +1,38 @@
+
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+from resource_management import *
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions import Direction
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.script.script import Script
+
+class KafkaUpgrade(Script):
+
+  def copy_kerberos_param(self,env):
+    import params
+    kafka_run_path = "/usr/iop/4.1.0.0/kafka/bin/kafka-run-class.sh"
+    if params.upgrade_direction is not None and params.upgrade_direction == Direction.UPGRADE:
+      Execute(("sed", "-i", "s/\$CLASSPATH \$KAFKA_OPTS/\$CLASSPATH \$KAFKA_OPTS \$KAFKA_KERBEROS_PARAMS/", kafka_run_path), logoutput=True)
+
+if __name__ == "__main__":
+  KafkaUpgrade().execute()


[50/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

Posted by al...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/after-INSTALL/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/after-INSTALL/scripts/hook.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/after-INSTALL/scripts/hook.py
new file mode 100755
index 0000000..ad7144c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/after-INSTALL/scripts/hook.py
@@ -0,0 +1,38 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.script.hook import Hook
+from shared_initialization import link_configs
+from shared_initialization import setup_config
+from shared_initialization import setup_iop_install_directory
+from resource_management.libraries.script import Script
+
+class AfterInstallHook(Hook):
+
+  def hook(self, env):
+    import params
+
+    env.set_params(params)
+    setup_iop_install_directory()
+    setup_config()
+
+    link_configs(self.stroutfile)
+
+if __name__ == "__main__":
+  AfterInstallHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/after-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/after-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/after-INSTALL/scripts/params.py
new file mode 100755
index 0000000..d3332db
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/after-INSTALL/scripts/params.py
@@ -0,0 +1,88 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions import default
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import format_jvm_option
+from resource_management.libraries.functions.version import format_stack_version
+
+from resource_management.core.system import System
+from ambari_commons.os_check import OSCheck
+
+
+config = Script.get_config()
+sudo = AMBARI_SUDO_BINARY
+
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+iop_stack_version = format_stack_version(stack_version_unformatted)
+
+# default hadoop params
+mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
+hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
+
+# IOP 4.0+ params
+if Script.is_stack_greater_or_equal("4.0"):
+  mapreduce_libs_path = "/usr/iop/current/hadoop-mapreduce-client/*"
+  # not supported in IOP 4.0+
+  hadoop_conf_empty_dir = None
+
+versioned_iop_root = '/usr/iop/current'
+
+#security params
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+#java params
+java_home = config['hostLevelParams']['java_home']
+
+#hadoop params
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
+hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
+
+jsvc_path = "/usr/lib/bigtop-utils"
+
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
+namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
+namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
+namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
+
+jtnode_opt_newsize = "200m"
+jtnode_opt_maxnewsize = "200m"
+jtnode_heapsize =  "1024m"
+ttnode_heapsize = "1024m"
+
+dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
+mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
+mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
+
+#users and groups
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+user_group = config['configurations']['cluster-env']['user_group']
+
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+has_namenode = not len(namenode_host) == 0
+
+if has_namenode:
+  hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/after-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/after-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/after-INSTALL/scripts/shared_initialization.py
new file mode 100755
index 0000000..0d68abc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/after-INSTALL/scripts/shared_initialization.py
@@ -0,0 +1,89 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+import shutil
+
+import ambari_simplejson as json
+from resource_management.core.logger import Logger
+from resource_management.core.resources.system import Execute
+from resource_management.core.shell import as_sudo
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.version import compare_versions
+from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.libraries.script import Script
+
+def setup_iop_install_directory():
+  # This is a name of marker file.
+  SELECT_ALL_PERFORMED_MARKER = "/var/lib/ambari-agent/data/iop-select-set-all.performed"
+
+  import params
+  if params.iop_stack_version != "" and compare_versions(params.iop_stack_version, '4.0') >= 0:
+    Execute(as_sudo(['touch', SELECT_ALL_PERFORMED_MARKER]) + ' ; ' +
+                   format('{sudo} /usr/bin/iop-select set all `ambari-python-wrap /usr/bin/iop-select versions | grep ^{stack_version_unformatted} | tail -1`'),
+            only_if=format('ls -d /usr/iop/{stack_version_unformatted}*'),   # If any IOP version is installed
+            not_if=format("test -f {SELECT_ALL_PERFORMED_MARKER}")           # Do that only once (otherwise we break rolling upgrade logic)
+    )
+
+def setup_config():
+  import params
+  if params.has_namenode:
+    # create core-site only if the hadoop config directory exists
+    XmlConfig("core-site.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations']['core-site'],
+              configuration_attributes=params.config['configuration_attributes']['core-site'],
+              owner=params.hdfs_user,
+              group=params.user_group,
+              only_if=format("ls {hadoop_conf_dir}"))
+
+
+def load_version(struct_out_file):
+  """
+  Load version from file.  Made a separate method for testing
+  """
+  json_version = None
+  try:
+    if os.path.exists(struct_out_file):
+      with open(struct_out_file, 'r') as fp:
+        json_info = json.load(fp)
+        json_version = json_info['version']
+  except:
+    pass
+
+  return json_version
+  
+
+def link_configs(struct_out_file):
+  """
+  Links configs, only on a fresh install of BigInsights-4.1 and higher
+  """
+
+  if not Script.is_stack_greater_or_equal("4.1"):
+    Logger.info("Can only link configs for BigInsights-4.1 and higher.")
+    return
+
+  json_version = load_version(struct_out_file)
+
+  if not json_version:
+    Logger.info("Could not load 'version' from {0}".format(struct_out_file))
+    return
+
+  for k, v in conf_select.get_package_dirs().iteritems():
+    conf_select.convert_conf_directories_to_symlinks(k, json_version, v)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/files/changeToSecureUid.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/files/changeToSecureUid.sh b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/files/changeToSecureUid.sh
new file mode 100755
index 0000000..af632b2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/files/changeToSecureUid.sh
@@ -0,0 +1,63 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+username=$1
+directories=$2
+newUid=$3
+
+function find_available_uid() {
+ for ((i=1001; i<=2000; i++))
+ do
+   grep -q $i /etc/passwd
+   if [ "$?" -ne 0 ]
+   then
+    newUid=$i
+    break
+   fi
+ done
+}
+
+if [ -z $2 ]; then
+  test $(id -u ${username} 2>/dev/null)
+  if [ $? -ne 1 ]; then
+   newUid=`id -u ${username}`
+  else
+   find_available_uid 
+  fi
+  echo $newUid
+  exit 0
+fi
+
+if [ $newUid -eq 0 ]
+then
+  echo "Failed to find Uid between 1000 and 2000"
+  exit 1
+fi
+
+
+set -e
+dir_array=($(echo $directories | sed 's/,/\n/g'))
+old_uid=$(id -u $username)
+sudo_prefix="/var/lib/ambari-agent/ambari-sudo.sh -H -E"
+echo "Changing uid of $username from $old_uid to $newUid"
+echo "Changing directory permisions for ${dir_array[@]}"
+$sudo_prefix usermod -u $newUid $username && for dir in ${dir_array[@]} ; do ls $dir 2> /dev/null && echo "Changing permission for $dir" && $sudo_prefix chown -Rh $newUid $dir ; done
+exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/scripts/hook.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/scripts/hook.py
new file mode 100755
index 0000000..1926f48
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/scripts/hook.py
@@ -0,0 +1,36 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from shared_initialization import *
+
+class BeforeAnyHook(Hook):
+
+  def hook(self, env):
+    import params
+    env.set_params(params)
+    
+    setup_users()
+    if params.has_namenode:
+      setup_hadoop_env()
+    setup_java()
+
+if __name__ == "__main__":
+  BeforeAnyHook().execute()
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/scripts/params.py
new file mode 100755
index 0000000..5ffd28c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/scripts/params.py
@@ -0,0 +1,226 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import collections
+import re
+import os
+
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions import default
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import format_jvm_option
+from resource_management.libraries.functions.is_empty import is_empty
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.version import compare_versions
+from ambari_commons.os_check import OSCheck
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+
+
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
+jdk_name = default("/hostLevelParams/jdk_name", None)
+java_home = config['hostLevelParams']['java_home']
+java_version = int(config['hostLevelParams']['java_version'])
+jdk_location = config['hostLevelParams']['jdk_location']
+
+sudo = AMBARI_SUDO_BINARY
+
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+iop_stack_version = format_stack_version(stack_version_unformatted)
+
+restart_type = default("/commandParams/restart_type", "")
+version = default("/commandParams/version", None)
+# Handle upgrade and downgrade
+if (restart_type.lower() == "rolling_upgrade" or restart_type.lower() == "nonrolling_upgrade") and version:
+  iop_stack_version = format_stack_version(version)
+
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+
+# Some datanode settings
+dfs_dn_addr = default('/configurations/hdfs-site/dfs.datanode.address', None)
+dfs_dn_http_addr = default('/configurations/hdfs-site/dfs.datanode.http.address', None)
+dfs_dn_https_addr = default('/configurations/hdfs-site/dfs.datanode.https.address', None)
+dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None)
+secure_dn_ports_are_in_use = False
+
+def get_port(address):
+  """
+  Extracts port from the address like 0.0.0.0:1019
+  """
+  if address is None:
+    return None
+  m = re.search(r'(?:http(?:s)?://)?([\w\d.]*):(\d{1,5})', address)
+  if m is not None:
+    return int(m.group(2))
+  else:
+    return None
+
+def is_secure_port(port):
+  """
+  Returns True if port is root-owned at *nix systems
+  """
+  if port is not None:
+    return port < 1024
+  else:
+    return False
+
+# hadoop default params
+mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+
+# upgrades would cause these directories to have a version instead of "current"
+# which would cause a lot of problems when writing out hadoop-env.sh; instead
+# force the use of "current" in the hook
+hadoop_home = stack_select.get_hadoop_dir("home", force_latest_on_upgrade=True)
+hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec", force_latest_on_upgrade=True)
+
+hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
+hadoop_secure_dn_user = hdfs_user
+hadoop_dir = "/etc/hadoop"
+versioned_iop_root = '/usr/iop/current'
+hadoop_java_io_tmpdir = os.path.join(tmp_dir, "hadoop_java_io_tmpdir")
+datanode_max_locked_memory = config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory']
+is_datanode_max_locked_memory_set = not is_empty(config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory'])
+
+
+if Script.is_stack_greater_or_equal("4.0"):
+  mapreduce_libs_path = "/usr/iop/current/hadoop-mapreduce-client/*"
+  
+  # not supported in IOP 4.0+
+  hadoop_conf_empty_dir = None
+
+  if not security_enabled:
+    hadoop_secure_dn_user = '""'
+  else:
+    dfs_dn_port = get_port(dfs_dn_addr)
+    dfs_dn_http_port = get_port(dfs_dn_http_addr)
+    dfs_dn_https_port = get_port(dfs_dn_https_addr)
+    # We try to avoid inability to start datanode as a plain user due to usage of root-owned ports
+    if dfs_http_policy == "HTTPS_ONLY":
+      secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_https_port)
+    elif dfs_http_policy == "HTTP_AND_HTTPS":
+      secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port) or is_secure_port(dfs_dn_https_port)
+    else:   # params.dfs_http_policy == "HTTP_ONLY" or not defined:
+      secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port)
+    if secure_dn_ports_are_in_use:
+      hadoop_secure_dn_user = hdfs_user
+    else:
+      hadoop_secure_dn_user = '""'
+
+
+#hadoop params
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
+hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
+
+jsvc_path = "/usr/lib/bigtop-utils"
+
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
+namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
+namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
+namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
+
+jtnode_opt_newsize = "200m"
+jtnode_opt_maxnewsize = "200m"
+jtnode_heapsize =  "1024m"
+ttnode_heapsize = "1024m"
+
+dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
+nfsgateway_heapsize = config['configurations']['hadoop-env']['nfsgateway_heapsize']
+mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
+mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
+hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
+
+#users and groups
+hbase_user = config['configurations']['hbase-env']['hbase_user']
+smoke_user =  config['configurations']['cluster-env']['smokeuser']
+gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
+gmond_user = config['configurations']['ganglia-env']["gmond_user"]
+tez_user = None; #config['configurations']['tez-env']["tez_user"]
+oozie_user = config['configurations']['oozie-env']["oozie_user"]
+falcon_user = config['configurations']['falcon-env']["falcon_user"]
+ranger_user = config['configurations']['ranger-env']["ranger_user"]
+
+user_group = config['configurations']['cluster-env']['user_group']
+
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+falcon_server_hosts = default("/clusterHostInfo/falcon_server_hosts", [])
+ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+
+has_namenode = not len(namenode_host) == 0
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+has_tez = False #'tez-site' in config['configurations']
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_oozie_server = not len(oozie_servers) == 0
+has_falcon_server_hosts = not len(falcon_server_hosts) == 0
+has_ranger_admin = not len(ranger_admin_hosts) == 0
+
+
+if has_namenode:
+  hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
+
+hbase_tmp_dir = "/tmp/hbase-hbase"
+
+proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
+ranger_group = config['configurations']['ranger-env']['ranger_group']
+dfs_cluster_administrators_group = config['configurations']['hdfs-site']["dfs.cluster.administrators"]
+
+ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False)
+
+smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
+if has_hbase_masters:
+  hbase_user_dirs = format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
+#repo params
+repo_info = config['hostLevelParams']['repo_info']
+service_repo_info = default("/hostLevelParams/service_repo_info",None)
+
+user_to_groups_dict = collections.defaultdict(lambda:[user_group])
+user_to_groups_dict[smoke_user] = [proxyuser_group]
+if has_ganglia_server:
+  user_to_groups_dict[gmond_user] = [gmond_user]
+  user_to_groups_dict[gmetad_user] = [gmetad_user]
+if has_tez:
+  user_to_groups_dict[tez_user] = [proxyuser_group]
+if has_oozie_server:
+  user_to_groups_dict[oozie_user] = [proxyuser_group]
+if has_falcon_server_hosts:
+  user_to_groups_dict[falcon_user] = [proxyuser_group]  
+if has_ranger_admin:
+  user_to_groups_dict[ranger_user] = [ranger_group]
+
+user_to_gid_dict = collections.defaultdict(lambda:user_group)
+
+user_list = json.loads(config['hostLevelParams']['user_list'])
+group_list = json.loads(config['hostLevelParams']['group_list'])
+host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+
+override_uid = str(default("/configurations/cluster-env/override_uid", "true")).lower()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/scripts/shared_initialization.py
new file mode 100755
index 0000000..1c98fb8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/scripts/shared_initialization.py
@@ -0,0 +1,242 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+import re
+import getpass
+from copy import copy
+from resource_management.libraries.functions.version import compare_versions
+from resource_management import *
+
+
+def setup_users():
+  """
+  Creates users before cluster installation
+  """
+  import params
+
+  if not params.host_sys_prepped and not params.ignore_groupsusers_create:
+    for group in params.group_list:
+      Group(group,
+      )
+
+
+    for user in params.user_list:
+      if params.override_uid == "true":
+        User(user,
+            uid = get_uid(user),
+            gid = params.user_to_gid_dict[user],
+            groups = params.user_to_groups_dict[user],
+        )
+      else:
+        User(user,
+            gid = params.user_to_gid_dict[user],
+            groups = params.user_to_groups_dict[user],
+        )
+
+    if params.override_uid == "true":
+      set_uid(params.smoke_user, params.smoke_user_dirs)
+    else:
+      Logger.info('Skipping setting uid for smoke user as host is sys prepped')
+  else:
+    Logger.info('Skipping creation of User and Group as host is sys prepped or ignore_groupsusers_create flag is on')
+    pass
+
+
+  if params.has_hbase_masters:
+    Directory (params.hbase_tmp_dir,
+               owner = params.hbase_user,
+               mode=0775,
+               create_parents = True,
+               cd_access="a",
+    )
+    if not params.host_sys_prepped and params.override_uid == "true":
+      set_uid(params.hbase_user, params.hbase_user_dirs)
+    else:
+      Logger.info('Skipping setting uid for hbase user as host is sys prepped')      
+      pass
+
+  if not params.host_sys_prepped:
+    if params.has_namenode:
+      create_dfs_cluster_admins()
+  else:
+    Logger.info('Skipping setting dfs cluster admin as host is sys prepped')
+
+
+def create_dfs_cluster_admins():
+  """
+  dfs.cluster.administrators support format <comma-delimited list of usernames><space><comma-delimited list of group names>
+  """
+  import params
+
+  groups_list = create_users_and_groups(params.dfs_cluster_administrators_group)
+
+  User(params.hdfs_user,
+    groups = params.user_to_groups_dict[params.hdfs_user] + groups_list,
+    ignore_failures = params.ignore_groupsusers_create
+  )
+def create_users_and_groups(user_and_groups):
+
+  import params
+
+  parts = re.split('\s', user_and_groups)
+  if len(parts) == 1:
+    parts.append("")
+
+  users_list = parts[0].split(",") if parts[0] else []
+  groups_list = parts[1].split(",") if parts[1] else []
+
+  if users_list:
+    User(users_list,
+         ignore_failures = params.ignore_groupsusers_create
+    )
+
+  if groups_list:
+    Group(copy(groups_list),
+          ignore_failures = params.ignore_groupsusers_create
+    )
+  return groups_list
+    
+def set_uid(user, user_dirs):
+  """
+  user_dirs - comma separated directories
+  """
+  import params
+
+  File(format("{tmp_dir}/changeUid.sh"),
+       content=StaticFile("changeToSecureUid.sh"),
+       mode=0555)
+  ignore_groupsusers_create_str = str(params.ignore_groupsusers_create).lower()
+  uid = get_uid(user)
+  Execute(format("{tmp_dir}/changeUid.sh {user} {user_dirs} {uid}"),
+          not_if = format("(test $(id -u {user}) -gt 1000) || ({ignore_groupsusers_create_str})"))
+
+def get_uid(user):
+  import params
+  import commands
+  user_str = str(user) + "_uid"
+  service_env = [ serviceEnv for serviceEnv in params.config['configurations'] if user_str in params.config['configurations'][serviceEnv]]
+  
+  if service_env and params.config['configurations'][service_env[0]][user_str]:
+    service_env_str = str(service_env[0])
+    uid = params.config['configurations'][service_env_str][user_str]
+    if len(service_env) > 1:
+      Logger.warning("Multiple values found for %s, using %s"  % (user_str, uid))
+    return uid 
+  else:
+    if user == params.smoke_user:
+      return 0
+    File(format("{tmp_dir}/changeUid.sh"),
+       content=StaticFile("changeToSecureUid.sh"),
+       mode=0555)
+    ignore_groupsusers_create_str = str(params.ignore_groupsusers_create).lower()
+    newUid=commands.getoutput(format("{tmp_dir}/changeUid.sh {user}"))
+    return newUid
+    
+def setup_hadoop_env():
+  import params
+  if params.has_namenode:
+    if params.security_enabled:
+      tc_owner = "root"
+    else:
+      tc_owner = params.hdfs_user
+
+    Directory(params.hadoop_dir, mode=0755)
+
+    # IOP < 4.0 used a conf -> conf.empty symlink for /etc/hadoop/
+    if Script.is_stack_less_than("4.0"):
+      Directory(params.hadoop_conf_empty_dir,
+              create_parents = True,
+              owner = 'root',
+              group = params.user_group
+      )
+      Link(params.hadoop_conf_dir,
+         to=params.hadoop_conf_empty_dir,
+         not_if=format("ls {hadoop_conf_dir}")
+      )
+      
+    # write out hadoop-env.sh, but only if the directory exists
+    if os.path.exists(params.hadoop_conf_dir):
+      File(os.path.join(params.hadoop_conf_dir, 'hadoop-env.sh'),
+         owner=tc_owner, group=params.user_group,
+         content=InlineTemplate(params.hadoop_env_sh_template)
+      )
+      
+    # Create tmp dir for java.io.tmpdir
+    # Handle a situation when /tmp is set to noexec
+    Directory(params.hadoop_java_io_tmpdir,
+              owner=params.hdfs_user,
+              group=params.user_group,
+              mode=0777
+    )
+  
+def setup_java():
+  """
+  Installs jdk using specific params, that comes from ambari-server
+  """
+  import params
+
+  java_exec = format("{java_home}/bin/java")
+
+  if not os.path.isfile(java_exec):
+
+    jdk_curl_target = format("{tmp_dir}/{jdk_name}")
+    java_dir = os.path.dirname(params.java_home)
+    tmp_java_dir = format("{tmp_dir}/jdk")
+
+    if not params.jdk_name:
+      return
+
+    Directory(params.artifact_dir,
+              create_parents = True,
+              )
+
+    File(jdk_curl_target,
+         content = DownloadSource(format("{jdk_location}/{jdk_name}")),
+         not_if = format("test -f {jdk_curl_target}")
+    )
+
+    if params.jdk_name.endswith(".bin"):
+      chmod_cmd = ("chmod", "+x", jdk_curl_target)
+      install_cmd = format("mkdir -p {tmp_java_dir} && cd {tmp_java_dir} && echo A | {jdk_curl_target} -noregister && {sudo} cp -rp {tmp_java_dir}/* {java_dir}")
+    elif params.jdk_name.endswith(".gz"):
+      chmod_cmd = ("chmod","a+x", java_dir)
+      install_cmd = format("mkdir -p {tmp_java_dir} && cd {tmp_java_dir} && tar -xf {jdk_curl_target} && {sudo} cp -rp {tmp_java_dir}/* {java_dir}")
+
+    Directory(java_dir
+    )
+
+    Execute(chmod_cmd,
+            sudo = True,
+            )
+
+    Execute(install_cmd,
+            )
+
+    File(format("{java_home}/bin/java"),
+         mode=0755,
+         cd_access="a",
+         )
+
+    Execute(("chgrp","-R", params.user_group, params.java_home),
+            sudo = True,
+            )
+    Execute(("chown","-R", getpass.getuser(), params.java_home),
+            sudo = True,
+            )

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-INSTALL/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-INSTALL/scripts/hook.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-INSTALL/scripts/hook.py
new file mode 100755
index 0000000..ce17776
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-INSTALL/scripts/hook.py
@@ -0,0 +1,37 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from shared_initialization import *
+from repo_initialization import *
+
+class BeforeInstallHook(Hook):
+
+  def hook(self, env):
+    import params
+
+    self.run_custom_hook('before-ANY')
+    env.set_params(params)
+    
+    install_repos()
+    install_packages()
+
+if __name__ == "__main__":
+  BeforeInstallHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-INSTALL/scripts/params.py
new file mode 100755
index 0000000..18ce5af
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-INSTALL/scripts/params.py
@@ -0,0 +1,111 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+from resource_management.libraries.functions.version import format_stack_version, compare_versions
+from resource_management.core.system import System
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import default, format
+
+
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+sudo = AMBARI_SUDO_BINARY
+
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+iop_stack_version = format_stack_version(stack_version_unformatted)
+
+#users and groups
+hbase_user = config['configurations']['hbase-env']['hbase_user']
+smoke_user =  config['configurations']['cluster-env']['smokeuser']
+gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
+gmond_user = config['configurations']['ganglia-env']["gmond_user"]
+tez_user = None #config['configurations']['tez-env']["tez_user"]
+
+user_group = config['configurations']['cluster-env']['user_group']
+proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
+
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+
+# repo templates
+repo_rhel_suse =  config['configurations']['cluster-env']['repo_suse_rhel_template']
+repo_ubuntu =  config['configurations']['cluster-env']['repo_ubuntu_template']
+
+#hosts
+hostname = config["hostname"]
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+rm_host = default("/clusterHostInfo/rm_host", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
+hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+hs_host = default("/clusterHostInfo/hs_host", [])
+jtnode_host = default("/clusterHostInfo/jtnode_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+storm_server_hosts = default("/clusterHostInfo/nimbus_hosts", [])
+falcon_host =  default('/clusterHostInfo/falcon_server_hosts', [])
+
+has_sqoop_client = 'sqoop-env' in config['configurations']
+has_namenode = not len(namenode_host) == 0
+has_hs = not len(hs_host) == 0
+has_resourcemanager = not len(rm_host) == 0
+has_slaves = not len(slave_hosts) == 0
+has_oozie_server = not len(oozie_servers)  == 0
+has_hcat_server_host = not len(hcat_server_hosts)  == 0
+has_hive_server_host = not len(hive_server_host)  == 0
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_zk_host = not len(zk_hosts) == 0
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+has_storm_server = not len(storm_server_hosts) == 0
+has_falcon_server = not len(falcon_host) == 0
+has_tez = False #'tez-site' in config['configurations']
+is_namenode_master = hostname in namenode_host
+is_jtnode_master = hostname in jtnode_host
+is_rmnode_master = hostname in rm_host
+is_hsnode_master = hostname in hs_host
+is_hbase_master = hostname in hbase_master_hosts
+is_slave = hostname in slave_hosts
+if has_ganglia_server:
+  ganglia_server_host = ganglia_server_hosts[0]
+
+hbase_tmp_dir = "/tmp/hbase-hbase"
+
+#security params
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+#java params
+java_home = config['hostLevelParams']['java_home']
+artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
+jdk_name = default("/hostLevelParams/jdk_name", None) # None when jdk is already installed by user
+jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
+jce_location = config['hostLevelParams']['jdk_location']
+jdk_location = config['hostLevelParams']['jdk_location']
+ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False)
+host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+
+smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
+if has_hbase_masters:
+  hbase_user_dirs = format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
+#repo params
+repo_info = config['hostLevelParams']['repo_info']
+service_repo_info = default("/hostLevelParams/service_repo_info",None)
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-INSTALL/scripts/repo_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-INSTALL/scripts/repo_initialization.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-INSTALL/scripts/repo_initialization.py
new file mode 100755
index 0000000..1b0f5c6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-INSTALL/scripts/repo_initialization.py
@@ -0,0 +1,90 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management import *
+from ambari_commons.os_check import OSCheck
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+
+# components_lits = repoName + postfix
+_UBUNTU_REPO_COMPONENTS_POSTFIX = ["main"]
+REPO_FILE_NAME_PREFIX = 'IOP-'
+STACK_TO_ROOT_FOLDER = {"IOP": "/usr/iop", "BIGINSIGHTS":"/usr/iop"}
+
+def _alter_repo(action, repo_string, repo_template):
+  """
+  @param action: "delete" or "create"
+  @param repo_string: e.g. "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0._\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]"
+  """
+  repo_dicts = json.loads(repo_string)
+
+  if not isinstance(repo_dicts, list):
+    repo_dicts = [repo_dicts]
+
+  for repo in repo_dicts:
+    if not 'baseUrl' in repo:
+      repo['baseUrl'] = None
+    if not 'mirrorsList' in repo:
+      repo['mirrorsList'] = None
+    
+    ubuntu_components = [ repo['repoName'] ] + _UBUNTU_REPO_COMPONENTS_POSTFIX
+    
+    Repository(repo['repoId'],
+               action = action,
+               base_url = repo['baseUrl'],
+               mirror_list = repo['mirrorsList'],
+               repo_file_name = repo['repoName'],
+               repo_template = repo_template,
+               components = ubuntu_components, # ubuntu specific
+    )
+
+    if action == "create":
+      #Attempt to clean cache against the given repo
+      repo_id=repo['repoId']
+      print "Clean cache against " + repo_id + "; file:" + repo['repoName']
+      current_repo_ids = []
+      current_repo_files = set() 
+      
+      if OSCheck.is_ubuntu_family():
+        current_repo_files.add("base")
+        current_repo_files.add(repo['repoName'])
+      elif OSCheck.is_suse_family():
+        current_repo_ids.append("base")
+        current_repo_ids.append(repo_id)
+      else:  
+        current_repo_ids.append(repo_id)
+      
+      Repository(repo_id,
+                 action = "clearcache",
+                 base_url = repo['baseUrl'],
+                 mirror_list = repo['mirrorsList'],
+                 repo_file_name = repo['repoName'],
+                 repo_template = repo_template,
+                 components = ubuntu_components,  # ubuntu specific
+                 use_repos=list(current_repo_files) if OSCheck.is_ubuntu_family() else current_repo_ids,
+                 skip_repos=["*"] if OSCheck.is_redhat_family() else []
+      )      
+
+def install_repos():
+  import params
+  if params.host_sys_prepped:
+    return
+
+  template = params.repo_rhel_suse if OSCheck.is_suse_family() or OSCheck.is_redhat_family() else params.repo_ubuntu
+  _alter_repo("create", params.repo_info, template)
+  if params.service_repo_info:
+    _alter_repo("create", params.service_repo_info, template)

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-INSTALL/scripts/shared_initialization.py
new file mode 100755
index 0000000..08748c9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-INSTALL/scripts/shared_initialization.py
@@ -0,0 +1,34 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from resource_management import *
+
+
+def install_packages():
+  import params
+  if params.host_sys_prepped:
+    return
+
+  packages = ['unzip', 'curl']
+  if params.iop_stack_version != "" and compare_versions(params.iop_stack_version, '4.0') >= 0:
+    packages.append('iop-select')
+  Package(packages)
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-RESTART/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-RESTART/scripts/hook.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-RESTART/scripts/hook.py
new file mode 100755
index 0000000..14b9d99
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-RESTART/scripts/hook.py
@@ -0,0 +1,29 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+class BeforeRestartHook(Hook):
+
+  def hook(self, env):
+    self.run_custom_hook('before-START')
+
+if __name__ == "__main__":
+  BeforeRestartHook().execute()
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/files/checkForFormat.sh b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/files/checkForFormat.sh
new file mode 100755
index 0000000..68aa96d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/files/checkForFormat.sh
@@ -0,0 +1,65 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export hdfs_user=$1
+shift
+export conf_dir=$1
+shift
+export bin_dir=$1
+shift
+export mark_dir=$1
+shift
+export name_dirs=$*
+
+export EXIT_CODE=0
+export command="namenode -format"
+export list_of_non_empty_dirs=""
+
+mark_file=/var/run/hadoop/hdfs/namenode-formatted
+if [[ -f ${mark_file} ]] ; then
+  /var/lib/ambari-agent/ambari-sudo.sh rm -f ${mark_file}
+  /var/lib/ambari-agent/ambari-sudo.sh mkdir -p ${mark_dir}
+fi
+
+if [[ ! -d $mark_dir ]] ; then
+  for dir in `echo $name_dirs | tr ',' ' '` ; do
+    echo "NameNode Dirname = $dir"
+    cmd="ls $dir | wc -l  | grep -q ^0$"
+    eval $cmd
+    if [[ $? -ne 0 ]] ; then
+      (( EXIT_CODE = $EXIT_CODE + 1 ))
+      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
+    fi
+  done
+
+  if [[ $EXIT_CODE == 0 ]] ; then
+    /var/lib/ambari-agent/ambari-sudo.sh su ${hdfs_user} - -s /bin/bash -c "export PATH=$PATH:$bin_dir ; yes Y | hdfs --config ${conf_dir} ${command}"
+    (( EXIT_CODE = $EXIT_CODE | $? ))
+  else
+    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
+  fi
+else
+  echo "${mark_dir} exists. Namenode DFS already formatted"
+fi
+
+exit $EXIT_CODE
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/files/fast-hdfs-resource.jar
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/files/fast-hdfs-resource.jar b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/files/fast-hdfs-resource.jar
new file mode 100755
index 0000000..6ea0873
Binary files /dev/null and b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/files/fast-hdfs-resource.jar differ

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/files/task-log4j.properties
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/files/task-log4j.properties b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/files/task-log4j.properties
new file mode 100755
index 0000000..7e12962
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/files/task-log4j.properties
@@ -0,0 +1,134 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+#
+# Job Summary Appender 
+#
+# Use following logger to send summary to separate file defined by 
+# hadoop.mapreduce.jobsummary.log.file rolled daily:
+# hadoop.mapreduce.jobsummary.logger=INFO,JSA
+# 
+hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
+hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this 
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+# Rolling File Appender
+#
+
+#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Logfile size and and 30-day backups
+#log4j.appender.RFA.MaxFileSize=1MB
+#log4j.appender.RFA.MaxBackupIndex=30
+
+#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+# Custom Logging levels
+
+hadoop.metrics.log.level=INFO
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+#
+# Null Appender
+# Trap security logger on the hadoop client side
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+ 
+# Removes "deprecated" messages
+log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/files/topology_script.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/files/topology_script.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/files/topology_script.py
new file mode 100755
index 0000000..0f7a55c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/files/topology_script.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import sys, os
+from string import join
+import ConfigParser
+
+
+DEFAULT_RACK = "/default-rack"
+DATA_FILE_NAME =  os.path.dirname(os.path.abspath(__file__)) + "/topology_mappings.data"
+SECTION_NAME = "network_topology"
+
+class TopologyScript():
+
+  def load_rack_map(self):
+    try:
+      #RACK_MAP contains both host name vs rack and ip vs rack mappings
+      mappings = ConfigParser.ConfigParser()
+      mappings.read(DATA_FILE_NAME)
+      return dict(mappings.items(SECTION_NAME))
+    except ConfigParser.NoSectionError:
+      return {}
+
+  def get_racks(self, rack_map, args):
+    if len(args) == 1:
+      return DEFAULT_RACK
+    else:
+      return join([self.lookup_by_hostname_or_ip(input_argument, rack_map) for input_argument in args[1:]],)
+
+  def lookup_by_hostname_or_ip(self, hostname_or_ip, rack_map):
+    #try looking up by hostname
+    rack = rack_map.get(hostname_or_ip)
+    if rack is not None:
+      return rack
+    #try looking up by ip
+    rack = rack_map.get(self.extract_ip(hostname_or_ip))
+    #try by localhost since hadoop could be passing in 127.0.0.1 which might not be mapped
+    return rack if rack is not None else rack_map.get("localhost.localdomain", DEFAULT_RACK)
+
+  #strips out port and slashes in case hadoop passes in something like 127.0.0.1/127.0.0.1:50010
+  def extract_ip(self, container_string):
+    return container_string.split("/")[0].split(":")[0]
+
+  def execute(self, args):
+    rack_map = self.load_rack_map()
+    rack = self.get_racks(rack_map, args)
+    print rack
+
+if __name__ == "__main__":
+  TopologyScript().execute(sys.argv)

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/hook.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/hook.py
new file mode 100755
index 0000000..c4854ae
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/hook.py
@@ -0,0 +1,40 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from rack_awareness import create_topology_script_and_mapping
+from shared_initialization import setup_hadoop, setup_configs, create_javahome_symlink
+
+
+class BeforeStartHook(Hook):
+
+  def hook(self, env):
+    import params
+
+    self.run_custom_hook('before-ANY')
+    env.set_params(params)
+
+    setup_hadoop()
+    setup_configs()
+    create_javahome_symlink()
+    create_topology_script_and_mapping()	
+
+if __name__ == "__main__":
+  BeforeStartHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/params.py
new file mode 100755
index 0000000..5c84a05
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/params.py
@@ -0,0 +1,211 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import default
+from resource_management.libraries.functions import format_jvm_option
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.version import format_stack_version, compare_versions
+from ambari_commons.os_check import OSCheck
+from resource_management.libraries.script.script import Script
+
+config = Script.get_config()
+
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+iop_stack_version = format_stack_version(stack_version_unformatted)
+
+# hadoop default params
+mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+
+hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
+hadoop_lib_home = stack_select.get_hadoop_dir("lib")
+hadoop_bin = stack_select.get_hadoop_dir("sbin")
+hadoop_home = '/usr'
+create_lib_snappy_symlinks = True
+
+# IOP 4.0+ params
+if Script.is_stack_greater_or_equal("4.0"):
+  mapreduce_libs_path = "/usr/iop/current/hadoop-mapreduce-client/*"
+  hadoop_home = stack_select.get_hadoop_dir("home")
+  create_lib_snappy_symlinks = False
+
+
+current_service = config['serviceName']
+
+#security params
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+#users and groups
+has_hadoop_env = 'hadoop-env' in config['configurations']
+mapred_user = config['configurations']['mapred-env']['mapred_user']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+yarn_user = config['configurations']['yarn-env']['yarn_user']
+
+user_group = config['configurations']['cluster-env']['user_group']
+
+#hosts
+hostname = config["hostname"]
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+rm_host = default("/clusterHostInfo/rm_host", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
+hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+hs_host = default("/clusterHostInfo/hs_host", [])
+jtnode_host = default("/clusterHostInfo/jtnode_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
+
+has_namenode = not len(namenode_host) == 0
+has_resourcemanager = not len(rm_host) == 0
+has_slaves = not len(slave_hosts) == 0
+has_oozie_server = not len(oozie_servers)  == 0
+has_hcat_server_host = not len(hcat_server_hosts)  == 0
+has_hive_server_host = not len(hive_server_host)  == 0
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_zk_host = not len(zk_hosts) == 0
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+has_metric_collector = not len(ams_collector_hosts) == 0
+
+
+is_namenode_master = hostname in namenode_host
+is_jtnode_master = hostname in jtnode_host
+is_rmnode_master = hostname in rm_host
+is_hsnode_master = hostname in hs_host
+is_hbase_master = hostname in hbase_master_hosts
+is_slave = hostname in slave_hosts
+if has_ganglia_server:
+  ganglia_server_host = ganglia_server_hosts[0]
+if has_metric_collector:
+  if 'cluster-env' in config['configurations'] and \
+      'metrics_collector_vip_host' in config['configurations']['cluster-env']:
+    metric_collector_host = config['configurations']['cluster-env']['metrics_collector_vip_host']
+  else:
+    metric_collector_host = ams_collector_hosts[0]
+  if 'cluster-env' in config['configurations'] and \
+      'metrics_collector_vip_port' in config['configurations']['cluster-env']:
+    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']
+  else:
+    metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
+    if metric_collector_web_address.find(':') != -1:
+      metric_collector_port = metric_collector_web_address.split(':')[1]
+    else:
+      metric_collector_port = '6188'
+  pass
+metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
+metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 60)
+
+#hadoop params
+
+if has_namenode:
+  hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
+  hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
+  task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties")
+  
+hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
+
+
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+hbase_tmp_dir = "/tmp/hbase-hbase"
+
+#db params
+server_db_name = config['hostLevelParams']['db_name']
+db_driver_filename = config['hostLevelParams']['db_driver_filename']
+oracle_driver_url = config['hostLevelParams']['oracle_jdbc_url']
+mysql_driver_url = config['hostLevelParams']['mysql_jdbc_url']
+ambari_server_resources = config['hostLevelParams']['jdk_location']
+oracle_driver_symlink_url = format("{ambari_server_resources}oracle-jdbc-driver.jar")
+mysql_driver_symlink_url = format("{ambari_server_resources}mysql-jdbc-driver.jar")
+
+ambari_db_rca_url = config['hostLevelParams']['ambari_db_rca_url'][0]
+ambari_db_rca_driver = config['hostLevelParams']['ambari_db_rca_driver'][0]
+ambari_db_rca_username = config['hostLevelParams']['ambari_db_rca_username'][0]
+ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password'][0]
+
+if has_namenode and 'rca_enabled' in config['configurations']['hadoop-env']:
+  rca_enabled =  config['configurations']['hadoop-env']['rca_enabled']
+else:
+  rca_enabled = False
+rca_disabled_prefix = "###"
+if rca_enabled == True:
+  rca_prefix = ""
+else:
+  rca_prefix = rca_disabled_prefix
+
+#hadoop-env.sh
+java_home = config['hostLevelParams']['java_home']
+
+jsvc_path = "/usr/lib/bigtop-utils"
+
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
+namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
+namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
+namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
+
+jtnode_opt_newsize = "200m"
+jtnode_opt_maxnewsize = "200m"
+jtnode_heapsize =  "1024m"
+ttnode_heapsize = "1024m"
+
+dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
+mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
+mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
+
+#log4j.properties
+
+yarn_log_dir_prefix = default("/configurations/yarn-env/yarn_log_dir_prefix","/var/log/hadoop-yarn")
+
+dfs_hosts = default('/configurations/hdfs-site/dfs.hosts', None)
+
+#log4j.properties
+if (('hdfs-log4j' in config['configurations']) and ('content' in config['configurations']['hdfs-log4j'])):
+  log4j_props = config['configurations']['hdfs-log4j']['content']
+  if (('yarn-log4j' in config['configurations']) and ('content' in config['configurations']['yarn-log4j'])):
+    log4j_props += config['configurations']['yarn-log4j']['content']
+else:
+  log4j_props = None
+
+refresh_topology = False
+command_params = config["commandParams"] if "commandParams" in config else None
+if command_params is not None:
+  refresh_topology = bool(command_params["refresh_topology"]) if "refresh_topology" in command_params else False
+  
+ambari_libs_dir = "/var/lib/ambari-agent/lib"
+is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
+#host info
+all_hosts = default("/clusterHostInfo/all_hosts", [])
+all_racks = default("/clusterHostInfo/all_racks", [])
+all_ipv4_ips = default("/clusterHostInfo/all_ipv4_ips", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+
+#topology files
+net_topology_script_file_path = "/etc/hadoop/conf/topology_script.py"
+net_topology_script_dir = os.path.dirname(net_topology_script_file_path)
+net_topology_mapping_data_file_name = 'topology_mappings.data'
+net_topology_mapping_data_file_path = os.path.join(net_topology_script_dir, net_topology_mapping_data_file_name)
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/rack_awareness.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/rack_awareness.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/rack_awareness.py
new file mode 100755
index 0000000..9b28d33
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/rack_awareness.py
@@ -0,0 +1,71 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+from resource_management.core.resources import File, Directory
+from resource_management.core.source import StaticFile, Template
+from resource_management.libraries.functions import format
+
+import os
+
+def create_topology_mapping():
+  import params
+  
+  path=params.net_topology_mapping_data_file_path 
+  parent_dir=os.path.dirname(path) 
+  # only create the parent directory and set its permission if it does not exist
+  if not os.path.exists(parent_dir): 
+    Directory(parent_dir, 
+              create_parents = True,
+              owner=params.hdfs_user, 
+              group=params.user_group) 
+
+  # placing the mappings file in the same folder where the topology script is located
+  File(path,
+       content=Template("topology_mappings.data.j2"),
+       owner=params.hdfs_user,
+       group=params.user_group,
+       # if there is no hadoop components, don't create the script
+       only_if=format("test -d {net_topology_script_dir}"),
+  )
+
+def create_topology_script():
+  import params
+
+  path=params.net_topology_script_file_path
+  parent_dir=os.path.dirname(path) 
+  # only create the parent directory and set its permission if it does not exist 
+  if not os.path.exists(parent_dir): 
+    Directory(parent_dir, 
+              create_parents = True,
+              owner=params.hdfs_user, 
+              group=params.user_group) 
+
+  # installing the topology script to the specified location
+  File(path,
+       content=StaticFile('topology_script.py'),
+       mode=0755,
+       only_if=format("test -d {net_topology_script_dir}"),
+  )
+
+  
+def create_topology_script_and_mapping():
+  import params
+  if params.has_hadoop_env:
+    create_topology_mapping()
+    create_topology_script()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/shared_initialization.py
new file mode 100755
index 0000000..3922231
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/scripts/shared_initialization.py
@@ -0,0 +1,152 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+from resource_management.libraries.providers.hdfs_resource import WebHDFSUtil
+
+from resource_management import *
+
+def setup_hadoop():
+  """
+  Setup hadoop files and directories
+  """
+  import params
+
+  Execute(("setenforce","0"),
+          only_if="test -f /selinux/enforce",
+          not_if="(! which getenforce ) || (which getenforce && getenforce | grep -q Disabled)",
+          sudo=True,
+  )
+
+  #directories
+  if params.has_namenode:
+    Directory(params.hdfs_log_dir_prefix,
+              create_parents = True,
+              owner='root',
+              group=params.user_group,
+              mode=0775,
+              cd_access='a',
+    )
+    Directory(params.hadoop_pid_dir_prefix,
+              create_parents = True,
+              owner='root',
+              group='root',
+              cd_access='a',
+    )
+    Directory(params.hadoop_tmp_dir,
+              create_parents = True,
+              owner=params.hdfs_user,
+              cd_access='a',
+              )
+  #files
+    if params.security_enabled:
+      tc_owner = "root"
+    else:
+      tc_owner = params.hdfs_user
+      
+    # if WebHDFS is not enabled we need this jar to create hadoop folders.
+    if not WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs):
+      # for source-code of jar goto contrib/fast-hdfs-resource
+      File(format("{ambari_libs_dir}/fast-hdfs-resource.jar"),
+           mode=0644,
+           content=StaticFile("fast-hdfs-resource.jar")
+      )
+      
+    if os.path.exists(params.hadoop_conf_dir):
+      File(os.path.join(params.hadoop_conf_dir, 'commons-logging.properties'),
+           mode=0644,
+           group=params.user_group,
+           owner=tc_owner,
+           content=Template('commons-logging.properties.j2')
+      )
+
+      health_check_template_name = "health_check"
+      File(os.path.join(params.hadoop_conf_dir, health_check_template_name),
+           owner=tc_owner,
+           content=Template(health_check_template_name + ".j2")
+      )
+
+      log4j_filename = os.path.join(params.hadoop_conf_dir, "log4j.properties")
+      if (params.log4j_props != None):
+        File(log4j_filename,
+             mode=0644,
+             group=params.user_group,
+             owner=params.hdfs_user,
+             content=params.log4j_props
+        )
+      elif (os.path.exists(format("{params.hadoop_conf_dir}/log4j.properties"))):
+        File(log4j_filename,
+             mode=0644,
+             group=params.user_group,
+             owner=params.hdfs_user,
+        )
+
+      File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"),
+           owner=params.hdfs_user,
+           content=Template("hadoop-metrics2.properties.j2")
+      )
+
+
+def setup_configs():
+  """
+  Creates configs for services HDFS mapred
+  """
+  import params
+
+  if params.has_namenode:
+    if os.path.exists(params.hadoop_conf_dir):
+      File(params.task_log4j_properties_location,
+           content=StaticFile("task-log4j.properties"),
+           mode=0755
+      )
+
+    if os.path.exists(os.path.join(params.hadoop_conf_dir, 'configuration.xsl')):
+      File(os.path.join(params.hadoop_conf_dir, 'configuration.xsl'),
+           owner=params.hdfs_user,
+           group=params.user_group
+      )
+    if os.path.exists(os.path.join(params.hadoop_conf_dir, 'masters')):
+      File(os.path.join(params.hadoop_conf_dir, 'masters'),
+                owner=params.hdfs_user,
+                group=params.user_group
+      )
+
+  generate_include_file()
+
+
+def generate_include_file():
+  import params
+
+  if params.has_namenode and params.dfs_hosts and params.has_slaves:
+    include_hosts_list = params.slave_hosts
+    File(params.dfs_hosts,
+         content=Template("include_hosts_list.j2"),
+         owner=params.hdfs_user,
+         group=params.user_group
+    )
+
+def create_javahome_symlink():
+  if os.path.exists("/usr/jdk/jdk1.6.0_31") and not os.path.exists("/usr/jdk64/jdk1.6.0_31"):
+    Directory("/usr/jdk64/",
+         create_parents = True,
+    )
+    Link("/usr/jdk/jdk1.6.0_31",
+         to="/usr/jdk64/jdk1.6.0_31",
+    )
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/commons-logging.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/commons-logging.properties.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/commons-logging.properties.j2
new file mode 100755
index 0000000..2197ba5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/commons-logging.properties.j2
@@ -0,0 +1,43 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+#Logging Implementation
+
+#Log4J
+org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger
+
+#JDK Logger
+#org.apache.commons.logging.Log=org.apache.commons.logging.impl.Jdk14Logger

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/exclude_hosts_list.j2
new file mode 100755
index 0000000..1adba80
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/exclude_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in hdfs_exclude_file %}
+{{host}}
+{% endfor %}