You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by al...@apache.org on 2017/06/28 00:24:47 UTC

[49/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights Stack Skeleton in Ambari 2.5 (alejandro)

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/hadoop-metrics2.properties.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
new file mode 100755
index 0000000..31bf1c6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
@@ -0,0 +1,88 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# syntax: [prefix].[source|sink|jmx].[instance].[options]
+# See package.html for org.apache.hadoop.metrics2 for details
+
+{% if has_ganglia_server %}
+*.period=60
+
+*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+*.sink.ganglia.period=10
+
+# default for supportsparse is false
+*.sink.ganglia.supportsparse=true
+
+.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+# Hook up to the server
+namenode.sink.ganglia.servers={{ganglia_server_host}}:8661
+datanode.sink.ganglia.servers={{ganglia_server_host}}:8659
+jobtracker.sink.ganglia.servers={{ganglia_server_host}}:8662
+tasktracker.sink.ganglia.servers={{ganglia_server_host}}:8658
+maptask.sink.ganglia.servers={{ganglia_server_host}}:8660
+reducetask.sink.ganglia.servers={{ganglia_server_host}}:8660
+resourcemanager.sink.ganglia.servers={{ganglia_server_host}}:8664
+nodemanager.sink.ganglia.servers={{ganglia_server_host}}:8657
+historyserver.sink.ganglia.servers={{ganglia_server_host}}:8666
+journalnode.sink.ganglia.servers={{ganglia_server_host}}:8654
+nimbus.sink.ganglia.servers={{ganglia_server_host}}:8649
+supervisor.sink.ganglia.servers={{ganglia_server_host}}:8650
+
+resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
+
+{% endif %}
+{% if has_metric_collector %}
+
+*.period={{metrics_collection_period}}
+*.sink.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
+*.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+*.sink.timeline.period={{metrics_collection_period}}
+*.sink.timeline.sendInterval={{metrics_report_interval}}000
+*.sink.timeline.slave.host.name = {{hostname}}
+
+datanode.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
+namenode.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
+resourcemanager.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
+nodemanager.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
+historyserver.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
+journalnode.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
+nimbus.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
+supervisor.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
+maptask.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
+reducetask.sink.timeline.collector={{metric_collector_host}}:{{metric_collector_port}}
+
+resourcemanager.sink.timeline.tagsForPrefix.yarn=Queue
+
+{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/health_check.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/health_check.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/health_check.j2
new file mode 100755
index 0000000..0a03d17
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/health_check.j2
@@ -0,0 +1,81 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#!/bin/bash
+#
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+err=0;
+
+function check_disks {
+
+  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
+    fsdev=""
+    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
+    if [ -z "$fsdev" -a "$m" != "/mnt" ] ; then
+      msg_="$msg_ $m(u)"
+    else
+      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
+    fi
+  done
+
+  if [ -z "$msg_" ] ; then
+    echo "disks ok" ; exit 0
+  else
+    echo "$msg_" ; exit 2
+  fi
+
+}
+
+# Run all checks
+for check in disks ; do
+  msg=`check_${check}` ;
+  if [ $? -eq 0 ] ; then
+    ok_msg="$ok_msg$msg,"
+  else
+    err_msg="$err_msg$msg,"
+  fi
+done
+
+if [ ! -z "$err_msg" ] ; then
+  echo -n "ERROR $err_msg "
+fi
+if [ ! -z "$ok_msg" ] ; then
+  echo -n "OK: $ok_msg"
+fi
+
+echo
+
+# Success!
+exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/include_hosts_list.j2
new file mode 100755
index 0000000..4a9e713
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/include_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in slave_hosts %}
+{{host}}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/topology_mappings.data.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/topology_mappings.data.j2 b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/topology_mappings.data.j2
new file mode 100755
index 0000000..15034d6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-START/templates/topology_mappings.data.j2
@@ -0,0 +1,24 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+    #
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+[network_topology]
+{% for host in all_hosts %}
+{% if host in slave_hosts %}
+{{host}}={{all_racks[loop.index-1]}}
+{{all_ipv4_ips[loop.index-1]}}={{all_racks[loop.index-1]}}
+{% endif %}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/kerberos.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/kerberos.json
new file mode 100755
index 0000000..027c20b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/kerberos.json
@@ -0,0 +1,68 @@
+{
+  "properties": {
+    "realm": "${kerberos-env/realm}",
+    "keytab_dir": "/etc/security/keytabs"
+  },
+  "identities": [
+    {
+      "name": "spnego",
+      "principal": {
+        "value": "HTTP/_HOST@${realm}",
+        "type" : "service"
+      },
+      "keytab": {
+        "file": "${keytab_dir}/spnego.service.keytab",
+        "owner": {
+          "name": "root",
+          "access": "r"
+        },
+        "group": {
+          "name": "${cluster-env/user_group}",
+          "access": "r"
+        }
+      }
+    },
+    {
+      "name": "hdfs",
+      "principal": {
+        "value": "${hadoop-env/hdfs_user}@${realm}",
+        "type" : "user" ,
+        "configuration": "hadoop-env/hdfs_principal_name",
+        "local_username" : "${hadoop-env/hdfs_user}"
+      },
+      "keytab": {
+        "file": "${keytab_dir}/hdfs.headless.keytab",
+        "owner": {
+          "name": "${hadoop-env/hdfs_user}",
+          "access": "r"
+        },
+        "group": {
+          "name": "${cluster-env/user_group}",
+          "access": "r"
+        },
+        "configuration": "hadoop-env/hdfs_user_keytab"
+      }
+    },
+    {
+      "name": "smokeuser",
+      "principal": {
+        "value": "${cluster-env/smokeuser}@${realm}",
+        "type" : "user",
+        "configuration": "cluster-env/smokeuser_principal_name",
+        "local_username" : "${cluster-env/smokeuser}"
+      },
+      "keytab": {
+        "file": "${keytab_dir}/smokeuser.headless.keytab",
+        "owner": {
+          "name": "${cluster-env/smokeuser}",
+          "access": "r"
+        },
+        "group": {
+          "name": "${cluster-env/user_group}",
+          "access": "r"
+        },
+        "configuration": "cluster-env/smokeuser_keytab"
+      }
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/metainfo.xml
new file mode 100755
index 0000000..1f40439
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/metainfo.xml
@@ -0,0 +1,22 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <versions>
+	    <active>false</active>
+    </versions>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_features.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_features.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_features.json
new file mode 100755
index 0000000..4627e73
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_features.json
@@ -0,0 +1,212 @@
+{
+  "stack_features": [
+    {
+      "name": "snappy",
+      "description": "Snappy compressor/decompressor support",
+      "max_version": "4.0.0.0"
+    },
+    {
+      "name": "lzo",
+      "description": "LZO libraries support",
+      "min_version": "4.1.0.0"
+    },
+    {
+      "name": "express_upgrade",
+      "description": "Express upgrade support",
+      "min_version": "4.2.0.0"
+    },
+    {
+      "name": "rolling_upgrade",
+      "description": "Rolling upgrade support",
+      "min_version": "4.1.0.0"
+    },
+    {
+      "name": "config_versioning",
+      "description": "Configurable versions support",
+      "min_version": "4.1.0.0"
+    },
+    {
+      "name": "datanode_non_root",
+      "description": "DataNode running as non-root support (AMBARI-7615)",
+      "min_version": "4.1.0.0"
+    },
+    {
+      "name": "remove_ranger_hdfs_plugin_env",
+      "description": "HDFS removes Ranger env files (AMBARI-14299)",
+      "min_version": "4.2.0.0"
+    },
+    {
+      "name": "ranger",
+      "description": "Ranger Service support",
+      "min_version": "4.2.0.0"
+    },
+    {
+      "name": "ranger_tagsync_component",
+      "description": "Ranger Tagsync component support (AMBARI-14383)",
+      "min_version": "4.2.5.0"
+    },
+    {
+      "name": "phoenix",
+      "description": "Phoenix Service support",
+      "min_version": "4.2.0.0"
+    },
+    {
+      "name": "nfs",
+      "description": "NFS support",
+      "min_version": "4.1.0.0"
+    },
+    {
+      "name": "timeline_state_store",
+      "description": "Yarn application timeline-service supports state store property (AMBARI-11442)",
+      "min_version": "4.0.0.0"
+    },
+    {
+      "name": "copy_tarball_to_hdfs",
+      "description": "Copy tarball to HDFS support (AMBARI-12113)",
+      "min_version": "4.1.0.0"
+    },
+    {
+      "name": "spark_16plus",
+      "description": "Spark 1.6+",
+      "min_version": "4.2.0.0"
+    },
+    {
+      "name": "spark_thriftserver",
+      "description": "Spark Thrift Server",
+      "min_version": "4.2.0.0"
+    },
+    {
+      "name": "create_kafka_broker_id",
+      "description": "Ambari should create Kafka Broker Id (AMBARI-12678)",
+      "min_version": "4.0.0.0",
+      "max_version": "4.2.0.0"
+    },
+    {
+      "name": "kafka_listeners",
+      "description": "Kafka listeners (AMBARI-10984)",
+      "min_version": "4.2.0.0"
+    },
+    {
+      "name": "kafka_kerberos",
+      "description": "Kafka Kerberos support (AMBARI-10984)",
+      "min_version": "4.1.0.0"
+    },
+    {
+      "name": "ranger_usersync_non_root",
+      "description": "Ranger Usersync as non-root user (AMBARI-10416)",
+      "min_version": "4.1.0.0"
+    },
+    {
+      "name": "ranger_audit_db_support",
+      "description": "Ranger Audit to DB support",
+      "min_version": "4.2.0.0",
+      "max_version": "4.2.0.0"
+    },
+    {
+      "name": "knox_versioned_data_dir",
+      "description": "Use versioned data dir for Knox (AMBARI-13164)",
+      "min_version": "4.2.0.0"
+    },
+    {
+      "name": "knox_sso_topology",
+      "description": "Knox SSO Topology support (AMBARI-13975)",
+      "min_version": "4.2.0.0"
+    },
+    {
+      "name": "oozie_admin_user",
+      "description": "Oozie install user as an Oozie admin user (AMBARI-7976)",
+      "min_version": "4.0.0.0"
+    },
+    {
+      "name": "oozie_setup_shared_lib",
+      "description": "Oozie setup tools used to shared Oozie lib to HDFS (AMBARI-7240)",
+      "min_version": "4.0.0.0"
+    },
+    {
+      "name": "oozie_host_kerberos",
+      "description": "Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)",
+      "min_version": "4.0.0.0",
+      "max_version": "4.1.0.0"
+    },
+    {
+      "name": "hive_metastore_upgrade_schema",
+      "description": "Hive metastore upgrade schema support (AMBARI-11176)",
+      "min_version": "4.1.0.0"
+     },
+    {
+      "name": "hive_server_interactive",
+      "description": "Hive server interactive support (AMBARI-15573)",
+      "min_version": "4.4.0.0"
+     },
+    {
+      "name": "hive_webhcat_specific_configs",
+      "description": "Hive webhcat specific configurations support (AMBARI-12364)",
+      "min_version": "4.1.0.0"
+     },
+    {
+      "name": "hive_purge_table",
+      "description": "Hive purge table support (AMBARI-12260)",
+      "min_version": "4.1.0.0"
+     },
+    {
+      "name": "hive_server2_kerberized_env",
+      "description": "Hive server2 working on kerberized environment (AMBARI-13749)",
+      "max_version": "4.1.0.0"
+     },
+    {
+      "name": "hive_env_heapsize",
+      "description": "Hive heapsize property defined in hive-env (AMBARI-12801)",
+      "min_version": "4.0.0.0"
+    },
+    {
+      "name": "ranger_kms_hsm_support",
+      "description": "Ranger KMS HSM support (AMBARI-15752)",
+      "min_version": "4.2.5.0"
+    },
+    {
+      "name": "ranger_log4j_support",
+      "description": "Ranger supporting log-4j properties (AMBARI-15681)",
+      "min_version": "4.2.5.0"
+    },
+    {
+      "name": "ranger_kerberos_support",
+      "description": "Ranger Kerberos support",
+      "min_version": "4.2.5.0"
+    },
+    {
+      "name": "hive_metastore_site_support",
+      "description": "Hive Metastore site support",
+      "min_version": "4.2.5.0"
+    },
+    {
+      "name": "ranger_usersync_password_jceks",
+      "description": "Saving Ranger Usersync credentials in jceks",
+      "min_version": "4.2.5.0"
+    },
+    {
+      "name": "ranger_install_logsearch_client",
+      "description": "LogSearch Service support",
+      "min_version": "4.2.5.0"
+    },
+    {
+      "name": "hbase_home_directory",
+      "description": "Hbase home directory in HDFS needed for HBASE backup",
+      "min_version": "4.2.5.0"
+    },
+    {
+      "name": "spark_livy",
+      "description": "Livy as slave component of spark",
+      "min_version": "4.4.0.0"
+    },
+    {
+      "name": "ranger_pid_support",
+      "description": "Ranger Service support pid generation AMBARI-16756",
+      "min_version": "4.2.5.0"
+    },
+    {
+      "name": "ranger_kms_pid_support",
+      "description": "Ranger KMS Service support pid generation",
+      "min_version": "4.2.5.0"
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_tools.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_tools.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_tools.json
new file mode 100755
index 0000000..fdbbdf9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/properties/stack_tools.json
@@ -0,0 +1,4 @@
+{
+  "stack_selector": ["iop-select", "/usr/bin/iop-select", "iop-select"],
+  "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/repos/repoinfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/repos/repoinfo.xml
new file mode 100755
index 0000000..034d528
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/repos/repoinfo.xml
@@ -0,0 +1,35 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<reposinfo>
+  <!-- TODO define latest json file for iop 
+  <latest>http://s3.amazonaws.com/dev.hortonworks.com/HDP/hdp_urlinfo.json</latest>
+  -->
+  <mainrepoid>IOP-4.0</mainrepoid>
+  <os family="redhat6">
+    <repo>
+      <baseurl>http://ibm-open-platform.ibm.com/repos/IOP/RHEL6/x86_64/4.0</baseurl>
+      <repoid>IOP-4.0</repoid>
+      <reponame>IOP</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://ibm-open-platform.ibm.com/repos/IOP-UTILS/RHEL6/x86_64/1.0</baseurl>
+      <repoid>IOP-UTILS-1.0</repoid>
+      <reponame>IOP-UTILS</reponame>
+    </repo>
+  </os>
+</reposinfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/role_command_order.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/role_command_order.json
new file mode 100755
index 0000000..bbf1905
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/role_command_order.json
@@ -0,0 +1,70 @@
+{
+  "_comment" : "Record format:",
+  "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
+  "general_deps" : {
+    "_comment" : "dependencies for all cases",
+    "HBASE_MASTER-START": ["ZOOKEEPER_SERVER-START"],
+    "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
+    "OOZIE_SERVER-START": ["NODEMANAGER-START", "RESOURCEMANAGER-START","ZOOKEEPER_SERVER-START"],
+    "WEBHCAT_SERVER-START": ["NODEMANAGER-START", "HIVE_SERVER-START"],
+    "WEBHCAT_SERVER-RESTART": ["NODEMANAGER-RESTART", "HIVE_SERVER-RESTART"],
+    "HIVE_METASTORE-START": ["MYSQL_SERVER-START", "NAMENODE-START"],
+    "HIVE_METASTORE-RESTART": ["MYSQL_SERVER-RESTART", "NAMENODE-RESTART"],
+    "HIVE_SERVER-START": ["NODEMANAGER-START", "MYSQL_SERVER-START"],
+    "HIVE_SERVER-RESTART": ["NODEMANAGER-RESTART", "MYSQL_SERVER-RESTART"],
+    "FLUME_HANDLER-START": ["OOZIE_SERVER-START"],
+    "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
+    "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START", "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK"],
+    "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
+    "HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", "HIVE_METASTORE-START", "WEBHCAT_SERVER-START"],
+    "HCAT_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START"],
+    "WEBHCAT_SERVICE_CHECK-SERVICE_CHECK": ["WEBHCAT_SERVER-START"],
+    "PIG_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
+    "SQOOP_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
+    "ZOOKEEPER_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],
+    "ZOOKEEPER_QUORUM_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],
+    "ZOOKEEPER_SERVER-STOP" : ["HBASE_MASTER-STOP", "HBASE_REGIONSERVER-STOP"],
+    "HBASE_MASTER-STOP": ["HBASE_REGIONSERVER-STOP"],
+    "SLIDER_SERVICE_CHECK-SERVICE_CHECK" : ["NODEMANAGER-START", "RESOURCEMANAGER-START"],    
+    "SPARK_SERVICE_CHECK-SERVICE_CHECK" : ["SPARK_JOBHISTORYSERVER-START", "SPARK_THRIFTSERVER-START", "APP_TIMELINE_SERVER-START"],
+    "FLUME_SERVICE_CHECK-SERVICE_CHECK": ["FLUME_HANDLER-START"]
+  },
+  "_comment" : "GLUSTERFS-specific dependencies",
+  "optional_glusterfs": {
+    "HBASE_MASTER-START": ["PEERSTATUS-START"],
+    "GLUSTERFS_SERVICE_CHECK-SERVICE_CHECK": ["PEERSTATUS-START"]
+  },
+  "_comment" : "Dependencies that are used when GLUSTERFS is not present in cluster",
+  "optional_no_glusterfs": {
+    "SECONDARY_NAMENODE-START": ["NAMENODE-START"],
+    "RESOURCEMANAGER-START": ["NAMENODE-START", "DATANODE-START"],
+    "NODEMANAGER-START": ["NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START"],
+    "HISTORYSERVER-START": ["NAMENODE-START", "DATANODE-START"],
+    "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START"],
+    "HIVE_SERVER-START": ["DATANODE-START"],
+    "WEBHCAT_SERVER-START": ["DATANODE-START"],
+    "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START",
+        "SECONDARY_NAMENODE-START"],
+    "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START",
+        "RESOURCEMANAGER-START", "HISTORYSERVER-START", "YARN_SERVICE_CHECK-SERVICE_CHECK"],
+    "YARN_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
+    "RESOURCEMANAGER_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START"],
+    "PIG_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START", "NODEMANAGER-START"],
+    "NAMENODE-STOP": ["RESOURCEMANAGER-STOP", "NODEMANAGER-STOP",
+        "HISTORYSERVER-STOP", "HBASE_MASTER-STOP"],
+    "DATANODE-STOP": ["RESOURCEMANAGER-STOP", "NODEMANAGER-STOP",
+        "HISTORYSERVER-STOP", "HBASE_MASTER-STOP"],
+    "SPARK_JOBHISTORYSERVER-START" : ["NAMENODE-START"],
+    "SPARK_THRIFTSERVER-START" : ["NAMENODE-START", "HIVE_METASTORE-START"],
+    "APP_TIMELINE_SERVER-START": ["NAMENODE-START", "DATANODE-START"]
+  },
+  "_comment" : "Dependencies that are used in HA NameNode cluster",
+  "namenode_optional_ha": {
+    "NAMENODE-START": ["ZKFC-START", "JOURNALNODE-START", "ZOOKEEPER_SERVER-START"],
+    "ZKFC-START": ["ZOOKEEPER_SERVER-START"]
+  },
+  "_comment" : "Dependencies that are used in ResourceManager HA cluster",
+  "resourcemanager_optional_ha" : {
+    "RESOURCEMANAGER-START": ["ZOOKEEPER_SERVER-START"]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/alerts.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/alerts.json
new file mode 100755
index 0000000..221e585
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/alerts.json
@@ -0,0 +1,183 @@
+{
+  "AMBARI_METRICS": {
+    "service": [
+      {
+        "name": "metrics_monitor_process_percent",
+        "label": "Percent Metrics Monitors Available",
+        "description": "This alert is triggered if a percentage of Metrics Monitor processes are not up and listening on the network for the configured warning and critical thresholds.",
+        "interval": 1,
+        "scope": "SERVICE",
+        "enabled": true,
+        "source": {
+          "type": "AGGREGATE",
+          "alert_name": "ams_metrics_monitor_process",
+          "reporting": {
+            "ok": {
+              "text": "affected: [{1}], total: [{0}]"
+            },
+            "warning": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 0.1
+            },
+            "critical": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 0.3
+            }
+          }
+        }
+      }
+    ],
+    "METRICS_COLLECTOR": [
+      {
+        "name": "ams_metrics_collector_autostart",
+        "label": "Metrics Collector - Auto-Restart Status",
+        "description": "This alert is triggered if the Metrics Collector has been restarted automatically too frequently in last one hour. By default, a Warning alert is triggered if restarted twice in one hour and a Critical alert is triggered if restarted 4 or more times in one hour.",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "RECOVERY",
+          "reporting": {
+            "ok": {
+              "text": "Metrics Collector has not been auto-started and is running normally{0}."
+            },
+            "warning": {
+              "text": "Metrics Collector has been auto-started {1} times{0}.",
+              "count": 2
+            },
+            "critical": {
+              "text": "Metrics Collector has been auto-started {1} times{0}.",
+              "count": 4
+            }
+          }
+        }
+      },
+      {
+        "name": "ams_metrics_collector_process",
+        "label": "Metrics Collector Process",
+        "description": "This alert is triggered if the Metrics Collector cannot be confirmed to be up and listening on the configured port for number of seconds equal to threshold.",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "PORT",
+          "uri": "{{ams-site/timeline.metrics.service.webapp.address}}",
+          "default_port": 6188,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      },
+      {
+        "name": "ams_metrics_collector_hbase_master_process",
+        "label": "Metrics Collector - HBase Master Process",
+        "description": "This alert is triggered if the Metrics Collector's HBase master processes cannot be confirmed to be up and listening on the network for the configured critical threshold, given in seconds.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "PORT",
+          "uri": "{{ams-hbase-site/hbase.master.info.port}}",
+          "default_port": 61310,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      },
+      {
+        "name": "ams_metrics_collector_hbase_master_cpu",
+        "label": "Metrics Collector - HBase Master CPU Utilization",
+        "description": "This host-level alert is triggered if CPU utilization of the Metrics Collector's HBase Master exceeds certain warning and critical thresholds. It checks the HBase Master JMX Servlet for the SystemCPULoad property. The threshold values are in percent.",
+        "interval": 5,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{ams-hbase-site/hbase.master.info.port}}",
+            "default_port": 61310,
+            "connection_timeout": 5.0
+          },
+          "reporting": {
+            "ok": {
+              "text": "{1} CPU, load {0:.1%}"
+            },
+            "warning": {
+              "text": "{1} CPU, load {0:.1%}",
+              "value": 200
+            },
+            "critical": {
+              "text": "{1} CPU, load {0:.1%}",
+              "value": 250
+            },
+            "units" : "%"
+          },
+          "jmx": {
+            "property_list": [
+              "java.lang:type=OperatingSystem/SystemCpuLoad",
+              "java.lang:type=OperatingSystem/AvailableProcessors"
+            ],
+            "value": "{0} * 100"
+          }
+        }
+      },
+      {
+        "name": "ams_metrics_collector_zookeeper_server_process",
+        "label": "Metrics Collector - ZooKeeper Server Process",
+        "description": "This host-level alert is triggered if the Metrics Collector's ZooKeeper server process cannot be determined to be up and listening on the network.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "PORT",
+          "uri": "{{ams-hbase-site/hbase.zookeeper.property.clientPort}}",
+          "default_port": 61181,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ],
+    "METRICS_MONITOR": [
+      {
+        "name": "ams_metrics_monitor_process",
+        "label": "Metrics Monitor Status",
+        "description": "This alert indicates the status of the Metrics Monitor process as determined by the monitor status script.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "SCRIPT",
+          "path": "AMBARI_METRICS/0.1.0.4.1/package/alerts/alert_ambari_metrics_monitor.py"
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-env.xml
new file mode 100755
index 0000000..5910253
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-env.xml
@@ -0,0 +1,107 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+
+<configuration>
+  <property>
+    <name>ambari_metrics_user</name>
+    <display-name>Ambari Metrics User</display-name>
+    <value>ams</value>
+    <property-type>USER</property-type>
+    <description>Ambari Metrics User Name.</description>
+  </property>
+  <property>
+    <name>metrics_collector_log_dir</name>
+    <value>/var/log/ambari-metrics-collector</value>
+    <display-name>Metrics Collector log dir</display-name>
+    <description>Collector log directory.</description>
+  </property>
+  <property>
+    <name>metrics_collector_pid_dir</name>
+    <value>/var/run/ambari-metrics-collector</value>
+    <display-name>Metrics Collector pid dir</display-name>
+    <description>Collector pid directory.</description>
+  </property>
+  <property>
+    <name>metrics_monitor_pid_dir</name>
+    <value>/var/run/ambari-metrics-monitor</value>
+    <display-name>Metrics Monitor pid dir</display-name>
+    <description>Monitor pid directory.</description>
+  </property>
+  <property>
+    <name>metrics_monitor_log_dir</name>
+    <value>/var/log/ambari-metrics-monitor</value>
+    <display-name>Metrics Monitor log dir</display-name>
+    <description>Monitor log directory.</description>
+  </property>
+  <property>
+    <name>metrics_collector_heapsize</name>
+    <value>512</value>
+    <description>Metrics Collector Heap Size</description>
+    <value-attributes>
+      <type>int</type>
+      <unit>MB</unit>
+    </value-attributes>
+  </property>
+  <property>
+    <name>content</name>
+    <value>
+# Set environment variables here.
+
+# The java implementation to use. Java 1.6 required.
+export JAVA_HOME={{java64_home}}
+
+# Collector Log directory for log4j
+export AMS_COLLECTOR_LOG_DIR={{ams_collector_log_dir}}
+
+# Monitor Log directory for outfile
+export AMS_MONITOR_LOG_DIR={{ams_monitor_log_dir}}
+
+# Collector pid directory
+export AMS_COLLECTOR_PID_DIR={{ams_collector_pid_dir}}
+
+# Monitor pid directory
+export AMS_MONITOR_PID_DIR={{ams_monitor_pid_dir}}
+
+# AMS HBase pid directory
+export AMS_HBASE_PID_DIR={{hbase_pid_dir}}
+
+# AMS Collector heapsize
+export AMS_COLLECTOR_HEAPSIZE={{metrics_collector_heapsize}}
+
+# HBase normalizer enabled
+export AMS_HBASE_NORMALIZER_ENABLED={{ams_hbase_normalizer_enabled}}
+
+# HBase compaction policy enabled
+export AMS_HBASE_FIFO_COMPACTION_ENABLED={{ams_hbase_fifo_compaction_enabled}}
+
+# AMS Collector options
+export AMS_COLLECTOR_OPTS="-Djava.library.path=/usr/lib/ams-hbase/lib/hadoop-native"
+{% if security_enabled %}
+export AMS_COLLECTOR_OPTS="$AMS_COLLECTOR_OPTS -Djava.security.auth.login.config={{ams_collector_jaas_config_file}} -Dzookeeper.sasl.client.username={{zk_servicename}}"
+{% endif %}
+
+# AMS Collector GC options
+export AMS_COLLECTOR_GC_OPTS="-XX:+UseConcMarkSweepGC -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{ams_collector_log_dir}}/collector-gc.log-`date +'%Y%m%d%H%M'`"
+export AMS_COLLECTOR_OPTS="$AMS_COLLECTOR_OPTS $AMS_COLLECTOR_GC_OPTS"
+
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-hbase-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-hbase-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-hbase-env.xml
new file mode 100755
index 0000000..379297f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-hbase-env.xml
@@ -0,0 +1,234 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>hbase_log_dir</name>
+    <value>/var/log/ambari-metrics-collector</value>
+    <description>Log Directories for HBase.</description>
+  </property>
+  <property>
+    <name>hbase_pid_dir</name>
+    <value>/var/run/ambari-metrics-collector/</value>
+    <description>Pid Directory for HBase.</description>
+  </property>
+  <property>
+    <name>hbase_classpath_additional</name>
+    <value></value>
+    <description>Additional directory or jar in classpath for HBase.</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase_regionserver_heapsize</name>
+    <value>1024</value>
+    <description>
+        HBase RegionServer Heap Size. In embedded mode, total heap size is
+        sum of master and regionserver heap sizes.
+    </description>
+    <value-attributes>
+      <type>int</type>
+      <unit>MB</unit>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ams-hbase-site</type>
+        <name>hbase.cluster.distributed</name>
+      </property>
+      <property>
+        <type>ams-hbase-site</type>
+        <name>hbase.rootdir</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>regionserver_xmn_size</name>
+    <value>256</value>
+    <description>HBase RegionServer maximum value for young generation heap size.</description>
+    <value-attributes>
+      <type>int</type>
+      <unit>MB</unit>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ams-hbase-site</type>
+        <name>hbase.cluster.distributed</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>hbase_master_xmn_size</name>
+    <value>256</value>
+    <description>
+      HBase Master maximum value for young generation heap size.
+    </description>
+    <value-attributes>
+      <type>int</type>
+      <unit>MB</unit>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ams-hbase-site</type>
+        <name>hbase.cluster.distributed</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>hbase_master_maxperm_size</name>
+    <value>128</value>
+    <description>HBase RegionServer maximum value for perm heap size.</description>
+    <value-attributes>
+      <type>int</type>
+      <unit>MB</unit>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase_regionserver_xmn_ratio</name>
+    <value>0.2</value>
+    <description>Percentage of max heap size (-Xmx) which used for young generation heap (-Xmn).</description>
+  </property>
+  <property>
+    <name>hbase_master_heapsize</name>
+    <value>1024</value>
+    <description>
+        HBase Master Heap Size. In embedded mode, total heap size is
+        sum of master and regionserver heap sizes.
+    </description>
+    <value-attributes>
+      <type>int</type>
+      <unit>MB</unit>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>ams-hbase-site</type>
+        <name>hbase.cluster.distributed</name>
+      </property>
+      <property>
+        <type>ams-hbase-site</type>
+        <name>hbase.rootdir</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>max_open_files_limit</name>
+    <value>32768</value>
+    <description>
+        The maximum number of open file descriptors by process
+    </description>
+  </property>
+
+  <!-- hbase-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for hbase-env.sh file</description>
+    <value>
+# Set environment variables here.
+
+# The java implementation to use. Java 1.6+ required.
+export JAVA_HOME={{java64_home}}
+
+# HBase Configuration directory
+export HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}
+
+# Extra Java CLASSPATH elements. Optional.
+additional_cp={{hbase_classpath_additional}}
+if [  -n "$additional_cp" ];
+then
+  export HBASE_CLASSPATH=${HBASE_CLASSPATH}:$additional_cp
+else
+  export HBASE_CLASSPATH=${HBASE_CLASSPATH}
+fi
+
+# The maximum amount of heap to use for hbase shell.
+export HBASE_SHELL_OPTS="-Xmx256m"
+
+# Extra Java runtime options.
+# Below are what we set by default. May only work with SUN JVM.
+# For more on why as well as other possible settings,
+# see http://wiki.apache.org/hadoop/PerformanceTuning
+export HBASE_OPTS="-XX:+UseConcMarkSweepGC -XX:ErrorFile={{hbase_log_dir}}/hs_err_pid%p.log -Djava.io.tmpdir={{hbase_tmp_dir}}"
+export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{hbase_log_dir}}/gc.log-`date +'%Y%m%d%H%M'`"
+# Uncomment below to enable java garbage collection logging.
+# export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log"
+
+# Uncomment and adjust to enable JMX exporting
+# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
+# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
+#
+# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
+
+{% if java_version &lt; 8 %}
+export HBASE_MASTER_OPTS=" -XX:PermSize=64m -XX:MaxPermSize={{hbase_master_maxperm_size}} -Xms{{hbase_heapsize}} -Xmx{{hbase_heapsize}} -Xmn{{hbase_master_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly"
+export HBASE_REGIONSERVER_OPTS="-XX:MaxPermSize=128m -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}"
+{% else %}
+export HBASE_MASTER_OPTS=" -Xms{{hbase_heapsize}} -Xmx{{hbase_heapsize}} -Xmn{{hbase_master_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly"
+export HBASE_REGIONSERVER_OPTS=" -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}"
+{% endif %}
+
+
+# export HBASE_THRIFT_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
+# export HBASE_ZOOKEEPER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
+
+# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
+export HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers
+
+# Extra ssh options. Empty by default.
+# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
+
+# Where log files are stored. $HBASE_HOME/logs by default.
+export HBASE_LOG_DIR={{hbase_log_dir}}
+
+# A string representing this instance of hbase. $USER by default.
+# export HBASE_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes. See 'man nice'.
+# export HBASE_NICENESS=10
+
+# The directory where pid files are stored. /tmp by default.
+export HBASE_PID_DIR={{hbase_pid_dir}}
+
+# Seconds to sleep between slave commands. Unset by default. This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HBASE_SLAVE_SLEEP=0.1
+
+# Tell HBase whether it should manage it's own instance of Zookeeper or not.
+export HBASE_MANAGES_ZK=false
+
+{% if security_enabled %}
+export HBASE_OPTS="$HBASE_OPTS -Djava.security.auth.login.config={{client_jaas_config_file}} -Dzookeeper.sasl.client.username={{zk_servicename}}"
+export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Djava.security.auth.login.config={{master_jaas_config_file}} -Dzookeeper.sasl.client.username={{zk_servicename}}"
+export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Djava.security.auth.login.config={{regionserver_jaas_config_file}} -Dzookeeper.sasl.client.username={{zk_servicename}}"
+export HBASE_ZOOKEEPER_OPTS="$HBASE_ZOOKEEPER_OPTS -Djava.security.auth.login.config={{ams_zookeeper_jaas_config_file}} -Dzookeeper.sasl.client.username={{zk_servicename}}"
+{% endif %}
+
+# use embedded native libs
+_HADOOP_NATIVE_LIB="/usr/lib/ams-hbase/lib/hadoop-native/"
+export HBASE_OPTS="$HBASE_OPTS -Djava.library.path=${_HADOOP_NATIVE_LIB}"
+
+# Unset HADOOP_HOME to avoid importing HADOOP installed cluster related configs
+export HADOOP_HOME={{ams_hbase_home_dir}}
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-hbase-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-hbase-log4j.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-hbase-log4j.xml
new file mode 100755
index 0000000..52ead29
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-hbase-log4j.xml
@@ -0,0 +1,146 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <description>Custom log4j.properties</description>
+    <value>
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Define some default values that can be overridden by system properties
+hbase.root.logger=INFO,console
+hbase.security.logger=INFO,console
+hbase.log.dir=.
+hbase.log.file=hbase.log
+
+# Define the root logger to the system property "hbase.root.logger".
+log4j.rootLogger=${hbase.root.logger}
+
+# Logging Threshold
+log4j.threshold=ALL
+
+#
+# Daily Rolling File Appender
+#
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n
+
+# Rolling File Appender properties
+hbase.log.maxfilesize=256MB
+hbase.log.maxbackupindex=20
+
+# Rolling File Appender
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hbase.log.dir}/${hbase.log.file}
+
+log4j.appender.RFA.MaxFileSize=${hbase.log.maxfilesize}
+log4j.appender.RFA.MaxBackupIndex=${hbase.log.maxbackupindex}
+
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n
+
+#
+# Security audit appender
+#
+hbase.security.log.file=SecurityAuth.audit
+hbase.security.log.maxfilesize=256MB
+hbase.security.log.maxbackupindex=20
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAS.File=${hbase.log.dir}/${hbase.security.log.file}
+log4j.appender.RFAS.MaxFileSize=${hbase.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hbase.security.log.maxbackupindex}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.category.SecurityLogger=${hbase.security.logger}
+log4j.additivity.SecurityLogger=false
+#log4j.logger.SecurityLogger.org.apache.hadoop.hbase.security.access.AccessController=TRACE
+
+#
+# Null Appender
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n
+
+# Custom Logging levels
+
+log4j.logger.org.apache.zookeeper=INFO
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.hbase=INFO
+# Make these two classes INFO-level. Make them DEBUG to see more zk debug.
+log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO
+log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO
+#log4j.logger.org.apache.hadoop.dfs=DEBUG
+# Set this class to log INFO only otherwise its OTT
+# Enable this to get detailed connection error/retry logging.
+# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE
+
+
+# Uncomment this line to enable tracing on _every_ RPC call (this can be a lot of output)
+#log4j.logger.org.apache.hadoop.ipc.HBaseServer.trace=DEBUG
+
+# Uncomment the below if you want to remove logging of client region caching'
+# and scan of .META. messages
+# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=INFO
+# log4j.logger.org.apache.hadoop.hbase.client.MetaScanner=INFO
+
+    </value>
+    <value-attributes>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-hbase-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-hbase-policy.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-hbase-policy.xml
new file mode 100755
index 0000000..febbd44
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-hbase-policy.xml
@@ -0,0 +1,53 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="true">
+  <property>
+    <name>security.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HRegionInterface protocol implementations (ie.
+      clients talking to HRegionServers)
+      The ACL is a comma-separated list of user and group names. The user and
+      group list is separated by a blank. For e.g. "alice,bob users,wheel".
+      A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.admin.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HMasterInterface protocol implementation (ie.
+      clients talking to HMaster for admin operations).
+      The ACL is a comma-separated list of user and group names. The user and
+      group list is separated by a blank. For e.g. "alice,bob users,wheel".
+      A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.masterregion.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HMasterRegionInterface protocol implementations
+      (for HRegionServers communicating with HMaster)
+      The ACL is a comma-separated list of user and group names. The user and
+      group list is separated by a blank. For e.g. "alice,bob users,wheel".
+      A special value of "*" means all users are allowed.</description>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-hbase-security-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-hbase-security-site.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-hbase-security-site.xml
new file mode 100755
index 0000000..5e7bc518
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-hbase-security-site.xml
@@ -0,0 +1,149 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>ams.zookeeper.keytab</name>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+  <property>
+    <name>ams.zookeeper.principal</name>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hadoop.security.authentication</name>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.coprocessor.master.classes</name>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.coprocessor.region.classes</name>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.master.kerberos.principal</name>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.master.keytab.file</name>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.myclient.keytab</name>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.myclient.principal</name>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.regionserver.kerberos.principal</name>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.regionserver.keytab.file</name>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.security.authentication</name>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.security.authorization</name>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.authProvider.1</name>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.jaasLoginRenew</name>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.kerberos.removeHostFromPrincipal</name>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.kerberos.removeRealmFromPrincipal</name>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+  <property>
+    <name>zookeeper.znode.parent</name>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-hbase-site.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-hbase-site.xml
new file mode 100755
index 0000000..00d0a2c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-hbase-site.xml
@@ -0,0 +1,384 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>hbase.rootdir</name>
+    <value>file:///var/lib/ambari-metrics-collector/hbase</value>
+    <description>
+      Ambari Metrics service uses HBase as default storage backend. Set the rootdir for
+      HBase to either local filesystem path if using Ambari Metrics in embedded mode or
+      to a HDFS dir, example: hdfs://namenode.example.org:8020/amshbase.
+    </description>
+  </property>
+  <property>
+    <name>hbase.tmp.dir</name>
+    <value>/var/lib/ambari-metrics-collector/hbase-tmp</value>
+    <description>
+      Temporary directory on the local filesystem.
+      Change this setting to point to a location more permanent
+      than '/tmp' (The '/tmp' directory is often cleared on
+      machine restart).
+    </description>
+  </property>
+  <property>
+    <name>hbase.local.dir</name>
+    <value>${hbase.tmp.dir}/local</value>
+    <description>Directory on the local filesystem to be used as a local storage
+    </description>
+  </property>
+  <property>
+    <name>hbase.cluster.distributed</name>
+    <value>false</value>
+    <description>
+      The mode the cluster will be in. Possible values are false for
+      standalone mode and true for distributed mode. If false, startup will run
+      all HBase and ZooKeeper daemons together in the one JVM.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.wait.on.regionservers.mintostart</name>
+    <value>1</value>
+    <description>
+      Ensure that HBase Master waits for # many region server to start.
+    </description>
+  </property>
+  <property>
+    <name>hbase.zookeeper.quorum</name>
+    <value>{{zookeeper_quorum_hosts}}</value>
+    <description>Comma separated list of servers in the ZooKeeper Quorum.
+      For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
+      By default this is set to localhost for local and pseudo-distributed modes
+      of operation. For a fully-distributed setup, this should be set to a full
+      list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
+      this is the list of servers which we will start/stop ZooKeeper on.
+    </description>
+    <final>true</final>
+  </property>
+  <property>
+    <name>hbase.master.info.bindAddress</name>
+    <value>0.0.0.0</value>
+    <description>The bind address for the HBase Master web UI</description>
+  </property>
+  <property>
+    <name>hbase.master.info.port</name>
+    <value>61310</value>
+    <description>The port for the HBase Master web UI.</description>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port</name>
+    <value>61330</value>
+    <description>The port for the HBase RegionServer web UI.</description>
+  </property>
+  <property>
+    <name>hbase.master.port</name>
+    <value>61300</value>
+    <description>The port for the HBase Master web UI.</description>
+  </property>
+  <property>
+    <name>hbase.regionserver.port</name>
+    <value>61320</value>
+    <description>The port for the HBase RegionServer web UI.</description>
+  </property>
+  <property>
+    <name>hbase.hregion.majorcompaction</name>
+    <value>0</value>
+    <description>
+      The time (in milliseconds) between 'major' compactions of all
+      HStoreFiles in a region.
+      0 to disable automated major compactions.
+    </description>
+  </property>
+  <property>
+    <name>phoenix.query.spoolThresholdBytes</name>
+    <value>20971520</value>
+    <description>
+      Threshold size in bytes after which results from parallelly executed
+      query results are spooled to disk. Default is 20 mb.
+    </description>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.dataDir</name>
+    <value>${hbase.tmp.dir}/zookeeper</value>
+    <description>
+      Property from ZooKeeper's config zoo.cfg.
+      The directory where the snapshot is stored.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.scanner.caching</name>
+    <value>10000</value>
+    <description>
+      Number of rows that will be fetched when calling next on a scanner
+      if it is not served from (local, client) memory.
+    </description>
+  </property>
+  <property>
+    <name>hbase.normalizer.enabled</name>
+    <value>true</value>
+    <description>If set to true, Master will try to keep region size
+    within each table approximately the same.</description>
+  </property>
+  <property>
+    <name>hbase.normalizer.period</name>
+    <value>600000</value>
+    <description>Period in ms at which the region normalizer runs in the Master.</description>
+  </property>
+  <property>
+    <name>hbase.master.normalizer.class</name>
+    <value>org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer</value>
+    <description>
+      Class used to execute the region normalization when the period occurs.
+      See the class comment for more on how it works
+      http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html
+    </description>
+  </property>
+  <property>
+    <name>hfile.block.cache.size</name>
+    <value>0.3</value>
+    <description>
+      Percentage of maximum heap (-Xmx setting) to allocate to block cache
+      used by a StoreFile. Default of 0.4 means allocate 40%.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.global.memstore.upperLimit</name>
+    <value>0.5</value>
+    <description>
+      Maximum size of all memstores in a region server before new
+      updates are blocked and flushes are forced. Defaults to 40% of heap
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.global.memstore.lowerLimit</name>
+    <value>0.4</value>
+    <description>
+      When memstores are being forced to flush to make room in
+      memory, keep flushing until we hit this mark. Defaults to 35% of heap.
+      This value equal to hbase.regionserver.global.memstore.upperLimit causes
+      the minimum possible flushing to occur when updates are blocked due to
+      memstore limiting.
+    </description>
+  </property>
+  <property>
+    <name>phoenix.groupby.maxCacheSize</name>
+    <value>307200000</value>
+    <description>
+      Size in bytes of pages cached during GROUP BY spilling. Default is 100Mb.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.max.filesize</name>
+    <value>4294967296</value>
+    <description>
+      Maximum HFile size. If the sum of the sizes of a region’s HFiles has grown
+      to exceed this value, the region is split in two. Default is 10Gb.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.block.multiplier</name>
+    <value>4</value>
+    <description>
+      Block updates if memstore has hbase.hregion.memstore.block.multiplier
+      times hbase.hregion.memstore.flush.size bytes. Useful preventing runaway
+      memstore during spikes in update traffic.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hstore.flusher.count</name>
+    <value>2</value>
+    <description>
+      The number of flush threads. With fewer threads, the MemStore flushes
+      will be queued. With more threads, the flushes will be executed in parallel,
+      increasing the load on HDFS, and potentially causing more compactions.
+    </description>
+  </property>
+  <property>
+    <name>phoenix.query.timeoutMs</name>
+    <value>1200000</value>
+    <description>
+      Number of milliseconds after which a query will timeout on the client.
+      Default is 10 min.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.scanner.timeout.period</name>
+    <value>900000</value>
+    <description>
+      Client scanner lease period in milliseconds.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.thread.compaction.large</name>
+    <value>2</value>
+    <description>
+      Configuration key for the large compaction threads.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.thread.compaction.small</name>
+    <value>3</value>
+    <description>
+      Configuration key for the small compaction threads.
+    </description>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.clientPort</name>
+    <value>61181</value>
+  </property>
+  <property>
+    <name>hbase.zookeeper.peerport</name>
+    <value>61288</value>
+  </property>
+  <property>
+    <name>hbase.zookeeper.leaderport</name>
+    <value>61388</value>
+  </property>
+  <property>
+    <name>hbase.hstore.blockingStoreFiles</name>
+    <value>200</value>
+    <description>
+      If more than this number of StoreFiles exist in any one Store
+      (one StoreFile is written per flush of MemStore), updates are blocked for
+      this region until a compaction is completed, or until
+      hbase.hstore.blockingWaitTime has been exceeded.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.flush.size</name>
+    <value>134217728</value>
+    <description>
+      Memstore will be flushed to disk if size of the memstore exceeds this
+      number of bytes. Value is checked by a thread that runs every
+      hbase.server.thread.wakefrequency.
+    </description>
+  </property>
+  <property>
+    <name>phoenix.query.spoolThresholdBytes</name>
+    <value>12582912</value>
+  </property>
+  <property>
+    <name>hbase.snapshot.enabled</name>
+    <value>false</value>
+    <description>Enable/Disable HBase snapshots.</description>
+  </property>
+  <property>
+    <name>hbase.replication</name>
+    <value>false</value>
+    <description>Enable/Disable HBase replication.</description>
+  </property>
+  <property>
+    <name>zookeeper.session.timeout</name>
+    <value>120000</value>
+    <description>ZooKeeper session timeout in milliseconds.</description>
+  </property>
+  <property>
+    <name>zookeeper.session.timeout.localHBaseCluster</name>
+    <value>120000</value>
+    <description>
+      ZooKeeper session timeout in milliseconds for
+      pseudo distributed mode.
+    </description>
+  </property>
+  <property>
+    <name>phoenix.sequence.saltBuckets</name>
+    <value>2</value>
+    <description>
+      Controls the number of pre-allocated regions for SYSTEM.SEQUENCE table.
+    </description>
+  </property>
+  <property>
+    <name>phoenix.query.maxGlobalMemoryPercentage</name>
+    <value>15</value>
+    <description>
+      Percentage of total heap memory (i.e. Runtime.getRuntime().maxMemory())
+      that all threads may use.
+    </description>
+  </property>
+  <property>
+    <name>phoenix.spool.directory</name>
+    <value>${hbase.tmp.dir}/phoenix-spool</value>
+    <description>
+      Set directory for Phoenix spill files. If possible set this to a
+      different mount point from the one for hbase.rootdir in embedded mode.
+    </description>
+  </property>
+  <property>
+    <name>phoenix.mutate.batchSize</name>
+    <value>10000</value>
+    <description>
+      The number of rows that are batched together and automatically committed
+      during the execution of an UPSERT SELECT or DELETE statement.
+      This affects performance of group by aggregators if they are being used.
+    </description>
+  </property>
+  <property>
+    <name>phoenix.query.rowKeyOrderSaltedTable</name>
+    <value>true</value>
+    <description>
+      When set, we disallow user specified split points on salted table to ensure
+      that each bucket will only contains entries with the same salt byte.
+      When this property is turned on, the salted table would behave just like
+      a normal table and would return items in rowkey order for scans
+    </description>
+  </property>
+  <property>
+    <name>phoenix.coprocessor.maxServerCacheTimeToLiveMs</name>
+    <value>60000</value>
+    <description>
+      Maximum living time (in milliseconds) of server caches. A cache entry
+      expires after this amount of time has passed since last access. Consider
+      adjusting this parameter when a server-side IOException(
+      “Could not find hash cache for joinId”) happens. Getting warnings like
+      “Earlier hash cache(s) might have expired on servers” might also be a
+      sign that this number should be increased.
+    </description>
+  </property>
+  <property>
+    <name>phoenix.coprocessor.maxMetaDataCacheSize</name>
+    <value>20480000</value>
+    <description>
+      Max size in bytes of total server-side metadata cache after which
+      evictions will begin to occur based on least recent access time.
+      Default is 20Mb
+    </description>
+  </property>
+  <property>
+    <name>dfs.client.read.shortcircuit</name>
+    <value>true</value>
+    <description>Enable/Disable short circuit read for your client.
+      Hadoop servers should be configured to allow short circuit read
+      for the hbase user for this to take effect
+    </description>
+    <depends-on>
+      <property>
+        <type>ams-hbase-site</type>
+        <name>hbase.cluster.distributed</name>
+      </property>
+      <property>
+        <type>ams-hbase-site</type>
+        <name>hbase.rootdir</name>
+      </property>
+    </depends-on>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-log4j.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-log4j.xml
new file mode 100755
index 0000000..2b0a4cf
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/configuration/ams-log4j.xml
@@ -0,0 +1,65 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <description>Custom log4j.properties</description>
+    <value>
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Define some default values that can be overridden by system properties
+ams.log.dir=.
+ams.log.file=ambari-metrics-collector.log
+
+# Root logger option
+log4j.rootLogger=INFO,file
+
+# Direct log messages to a log file
+log4j.appender.file=org.apache.log4j.RollingFileAppender
+log4j.appender.file.File=${ams.log.dir}/${ams.log.file}
+log4j.appender.file.MaxFileSize=80MB
+log4j.appender.file.MaxBackupIndex=60
+log4j.appender.file.layout=org.apache.log4j.PatternLayout
+log4j.appender.file.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+    </value>
+    <value-attributes>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+  </property>
+
+</configuration>