You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by sc...@apache.org on 2015/04/10 20:48:20 UTC
[2/2] ambari git commit: AMBARI-10318 Add and enable 2.3.GlusterFS
stack for HDP
AMBARI-10318 Add and enable 2.3.GlusterFS stack for HDP
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/40b8b2b4
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/40b8b2b4
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/40b8b2b4
Branch: refs/heads/trunk
Commit: 40b8b2b445746143dbea37c8cea5c943c6f3149c
Parents: 83df8c2
Author: Scott Creeley <sc...@redhat.com>
Authored: Fri Apr 10 14:47:03 2015 -0400
Committer: Scott Creeley <sc...@redhat.com>
Committed: Fri Apr 10 14:47:38 2015 -0400
----------------------------------------------------------------------
.../2.3.GlusterFS/configuration/cluster-env.xml | 107 ++++++
.../configuration/cluster-env.xml.noversion | 56 +++
.../configuration/cluster-env.xml.version | 107 ++++++
.../stacks/HDP/2.3.GlusterFS/metainfo.xml | 23 ++
.../stacks/HDP/2.3.GlusterFS/repos/repoinfo.xml | 80 ++++
.../HDP/2.3.GlusterFS/role_command_order.json | 8 +
.../ACCUMULO/configuration/accumulo-log4j.xml | 112 ++++++
.../services/ACCUMULO/kerberos.json | 145 ++++++++
.../services/ACCUMULO/metainfo.xml | 49 +++
.../2.3.GlusterFS/services/FALCON/metainfo.xml | 44 +++
.../2.3.GlusterFS/services/FLUME/metainfo.xml | 50 +++
.../GLUSTERFS/configuration/core-site.xml | 43 +++
.../GLUSTERFS/configuration/hadoop-env.xml | 194 ++++++++++
.../services/GLUSTERFS/metainfo.xml | 71 ++++
.../GLUSTERFS/package/scripts/glusterfs.py | 29 ++
.../package/scripts/glusterfs_client.py | 34 ++
.../GLUSTERFS/package/scripts/params.py | 29 ++
.../GLUSTERFS/package/scripts/service_check.py | 37 ++
.../package/templates/glusterfs-env.sh.j2 | 18 +
.../package/templates/glusterfs.properties.j2 | 36 ++
.../services/HBASE/configuration/hbase-site.xml | 370 ++++++++++++++++++
.../2.3.GlusterFS/services/HBASE/metainfo.xml | 56 +++
.../2.3.GlusterFS/services/HDFS/metainfo.xml | 92 +++++
.../HIVE/configuration/webhcat-site.xml | 135 +++++++
.../2.3.GlusterFS/services/HIVE/metainfo.xml | 85 +++++
.../2.3.GlusterFS/services/KAFKA/metainfo.xml | 44 +++
.../services/KERBEROS/metainfo.xml | 25 ++
.../2.3.GlusterFS/services/KNOX/metainfo.xml | 44 +++
.../2.3.GlusterFS/services/MAHOUT/metainfo.xml | 26 ++
.../2.3.GlusterFS/services/OOZIE/metainfo.xml | 70 ++++
.../HDP/2.3.GlusterFS/services/PIG/metainfo.xml | 52 +++
.../2.3.GlusterFS/services/RANGER/metainfo.xml | 54 +++
.../2.3.GlusterFS/services/SLIDER/metainfo.xml | 56 +++
.../2.3.GlusterFS/services/SPARK/metainfo.xml | 53 +++
.../2.3.GlusterFS/services/SQOOP/metainfo.xml | 57 +++
.../2.3.GlusterFS/services/STORM/metainfo.xml | 45 +++
.../HDP/2.3.GlusterFS/services/TEZ/metainfo.xml | 46 +++
.../YARN/configuration-mapred/core-site.xml.2 | 20 +
.../YARN/configuration-mapred/mapred-site.xml | 88 +++++
.../YARN/configuration-mapred/mapred-site.xml.2 | 68 ++++
.../YARN/configuration/capacity-scheduler.xml | 45 +++
.../services/YARN/configuration/yarn-site.xml | 372 +++++++++++++++++++
.../2.3.GlusterFS/services/YARN/metainfo.xml | 104 ++++++
.../services/ZOOKEEPER/metainfo.xml | 45 +++
.../HDP/2.3.GlusterFS/services/stack_advisor.py | 21 ++
45 files changed, 3345 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/configuration/cluster-env.xml
new file mode 100644
index 0000000..485125c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/configuration/cluster-env.xml
@@ -0,0 +1,107 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+
+ <!-- The properties that end in tar_source describe the pattern of where the tar.gz files come from.
+ They will replace {{ hdp_stack_version }} with the "#.#.#.#" value followed by -* (which is the build number in HDP 2.2).
+ When copying those tarballs, Ambari will look up the corresponding tar_destination_folder property to know where it
+ should be copied to.
+ All of the destination folders must begin with hdfs://
+ Please note that the spaces inside of {{ ... }} are important.
+
+ IMPORTANT: Any properties included here must also be declared in site_properties.js
+
+ -->
+ <!-- Tez tarball is needed by Hive Server when using the Tez execution engine. -->
+ <property>
+ <name>tez_tar_source</name>
+ <value>/usr/hdp/current/tez-client/lib/tez.tar.gz</value>
+ <description>Source file path that uses dynamic variables and regex to copy the file to HDFS.</description>
+ </property>
+ <property>
+ <name>tez_tar_destination_folder</name>
+ <value>glusterfs:///apps/{{ hdp_stack_version }}/tez/</value>
+ <description>Destination HDFS folder for the file.</description>
+ </property>
+
+ <!-- Hive tarball is needed by WebHCat. -->
+ <property>
+ <name>hive_tar_source</name>
+ <value>/usr/hdp/current/hive-client/hive.tar.gz</value>
+ <description>Source file path that uses dynamic variables and regex to copy the file to HDFS.</description>
+ </property>
+ <property>
+ <name>hive_tar_destination_folder</name>
+ <value>glusterfs:///apps/{{ hdp_stack_version }}/hive/</value>
+ <description>Destination HDFS folder for the file.</description>
+ </property>
+
+ <!-- Pig tarball is needed by WebHCat. -->
+ <property>
+ <name>pig_tar_source</name>
+ <value>/usr/hdp/current/pig-client/pig.tar.gz</value>
+ <description>Source file path that uses dynamic variables and regex to copy the file to HDFS.</description>
+ </property>
+ <property>
+ <name>pig_tar_destination_folder</name>
+ <value>glusterfs:///apps/{{ hdp_stack_version }}/pig/</value>
+ <description>Destination HDFS folder for the file.</description>
+ </property>
+
+ <!-- Hadoop Streaming jar is needed by WebHCat. -->
+ <property>
+ <name>hadoop-streaming_tar_source</name>
+ <value>/usr/hdp/current/hadoop-mapreduce-client/hadoop-streaming.jar</value>
+ <description>Source file path that uses dynamic variables and regex to copy the file to HDFS.</description>
+ </property>
+ <property>
+ <name>hadoop-streaming_tar_destination_folder</name>
+ <value>glusterfs:///apps/{{ hdp_stack_version }}/mapreduce/</value>
+ <description>Destination HDFS folder for the file.</description>
+ </property>
+
+ <!-- Sqoop tarball is needed by WebHCat. -->
+ <property>
+ <name>sqoop_tar_source</name>
+ <value>/usr/hdp/current/sqoop-client/sqoop.tar.gz</value>
+ <description>Source file path that uses dynamic variables and regex to copy the file to HDFS.</description>
+ </property>
+ <property>
+ <name>sqoop_tar_destination_folder</name>
+ <value>glusterfs:///apps/{{ hdp_stack_version }}/sqoop/</value>
+ <description>Destination HDFS folder for the file.</description>
+ </property>
+
+ <!-- MapReduce2 tarball -->
+ <property>
+ <name>mapreduce_tar_source</name>
+ <value>/usr/hdp/current/hadoop-client/mapreduce.tar.gz</value>
+ <description>Source file path that uses dynamic variables and regex to copy the file to HDFS.</description>
+ </property>
+ <property>
+ <name>mapreduce_tar_destination_folder</name>
+ <value>glusterfs:///apps/{{ hdp_stack_version }}/mapreduce/</value>
+ <description>Destination HDFS folder for the file.</description>
+ </property>
+
+</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/configuration/cluster-env.xml.noversion
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/configuration/cluster-env.xml.noversion b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/configuration/cluster-env.xml.noversion
new file mode 100644
index 0000000..d41ff98
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/configuration/cluster-env.xml.noversion
@@ -0,0 +1,56 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+ <property>
+ <name>security_enabled</name>
+ <value>false</value>
+ <description>Hadoop Security</description>
+ </property>
+ <property>
+ <name>kerberos_domain</name>
+ <value>EXAMPLE.COM</value>
+ <description>Kerberos realm.</description>
+ </property>
+ <property>
+ <name>ignore_groupsusers_create</name>
+ <value>false</value>
+ <description>Whether to ignore failures on users and group creation</description>
+ </property>
+ <property>
+ <name>smokeuser</name>
+ <value>ambari-qa</value>
+ <property-type>USER</property-type>
+ <description>User executing service checks</description>
+ </property>
+ <property>
+ <name>smokeuser_keytab</name>
+ <value>/etc/security/keytabs/smokeuser.headless.keytab</value>
+ <description>Path to smoke test user keytab file</description>
+ </property>
+ <property>
+ <name>user_group</name>
+ <value>hadoop</value>
+ <property-type>GROUP</property-type>
+ <description>Hadoop user group.</description>
+ </property>
+</configuration>
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/configuration/cluster-env.xml.version
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/configuration/cluster-env.xml.version b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/configuration/cluster-env.xml.version
new file mode 100644
index 0000000..485125c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/configuration/cluster-env.xml.version
@@ -0,0 +1,107 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+
+ <!-- The properties that end in tar_source describe the pattern of where the tar.gz files come from.
+ They will replace {{ hdp_stack_version }} with the "#.#.#.#" value followed by -* (which is the build number in HDP 2.2).
+ When copying those tarballs, Ambari will look up the corresponding tar_destination_folder property to know where it
+ should be copied to.
+ All of the destination folders must begin with hdfs://
+ Please note that the spaces inside of {{ ... }} are important.
+
+ IMPORTANT: Any properties included here must also be declared in site_properties.js
+
+ -->
+ <!-- Tez tarball is needed by Hive Server when using the Tez execution engine. -->
+ <property>
+ <name>tez_tar_source</name>
+ <value>/usr/hdp/current/tez-client/lib/tez.tar.gz</value>
+ <description>Source file path that uses dynamic variables and regex to copy the file to HDFS.</description>
+ </property>
+ <property>
+ <name>tez_tar_destination_folder</name>
+ <value>glusterfs:///apps/{{ hdp_stack_version }}/tez/</value>
+ <description>Destination HDFS folder for the file.</description>
+ </property>
+
+ <!-- Hive tarball is needed by WebHCat. -->
+ <property>
+ <name>hive_tar_source</name>
+ <value>/usr/hdp/current/hive-client/hive.tar.gz</value>
+ <description>Source file path that uses dynamic variables and regex to copy the file to HDFS.</description>
+ </property>
+ <property>
+ <name>hive_tar_destination_folder</name>
+ <value>glusterfs:///apps/{{ hdp_stack_version }}/hive/</value>
+ <description>Destination HDFS folder for the file.</description>
+ </property>
+
+ <!-- Pig tarball is needed by WebHCat. -->
+ <property>
+ <name>pig_tar_source</name>
+ <value>/usr/hdp/current/pig-client/pig.tar.gz</value>
+ <description>Source file path that uses dynamic variables and regex to copy the file to HDFS.</description>
+ </property>
+ <property>
+ <name>pig_tar_destination_folder</name>
+ <value>glusterfs:///apps/{{ hdp_stack_version }}/pig/</value>
+ <description>Destination HDFS folder for the file.</description>
+ </property>
+
+ <!-- Hadoop Streaming jar is needed by WebHCat. -->
+ <property>
+ <name>hadoop-streaming_tar_source</name>
+ <value>/usr/hdp/current/hadoop-mapreduce-client/hadoop-streaming.jar</value>
+ <description>Source file path that uses dynamic variables and regex to copy the file to HDFS.</description>
+ </property>
+ <property>
+ <name>hadoop-streaming_tar_destination_folder</name>
+ <value>glusterfs:///apps/{{ hdp_stack_version }}/mapreduce/</value>
+ <description>Destination HDFS folder for the file.</description>
+ </property>
+
+ <!-- Sqoop tarball is needed by WebHCat. -->
+ <property>
+ <name>sqoop_tar_source</name>
+ <value>/usr/hdp/current/sqoop-client/sqoop.tar.gz</value>
+ <description>Source file path that uses dynamic variables and regex to copy the file to HDFS.</description>
+ </property>
+ <property>
+ <name>sqoop_tar_destination_folder</name>
+ <value>glusterfs:///apps/{{ hdp_stack_version }}/sqoop/</value>
+ <description>Destination HDFS folder for the file.</description>
+ </property>
+
+ <!-- MapReduce2 tarball -->
+ <property>
+ <name>mapreduce_tar_source</name>
+ <value>/usr/hdp/current/hadoop-client/mapreduce.tar.gz</value>
+ <description>Source file path that uses dynamic variables and regex to copy the file to HDFS.</description>
+ </property>
+ <property>
+ <name>mapreduce_tar_destination_folder</name>
+ <value>glusterfs:///apps/{{ hdp_stack_version }}/mapreduce/</value>
+ <description>Destination HDFS folder for the file.</description>
+ </property>
+
+</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/metainfo.xml
new file mode 100644
index 0000000..24eff8e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/metainfo.xml
@@ -0,0 +1,23 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <versions>
+ <active>false</active>
+ </versions>
+ <extends>2.2</extends>
+</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/repos/repoinfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/repos/repoinfo.xml
new file mode 100644
index 0000000..646b2c0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/repos/repoinfo.xml
@@ -0,0 +1,80 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<reposinfo>
+ <latest>http://public-repo-1.hortonworks.com/HDP/hdp_urlinfo.json</latest>
+ <os family="redhat6">
+ <repo>
+ <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.3.0.0</baseurl>
+ <repoid>HDP-2.3</repoid>
+ <reponame>HDP</reponame>
+ </repo>
+ <repo>
+ <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6</baseurl>
+ <repoid>HDP-UTILS-1.1.0.20</repoid>
+ <reponame>HDP-UTILS</reponame>
+ </repo>
+ </os>
+ <os family="redhat5">
+ <repo>
+ <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos5/2.x/updates/2.3.0.0</baseurl>
+ <repoid>HDP-2.3</repoid>
+ <reponame>HDP</reponame>
+ </repo>
+ <repo>
+ <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos5</baseurl>
+ <repoid>HDP-UTILS-1.1.0.20</repoid>
+ <reponame>HDP-UTILS</reponame>
+ </repo>
+ </os>
+ <os family="suse11">
+ <repo>
+ <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/suse11sp3/2.x/updates/2.3.0.0</baseurl>
+ <repoid>HDP-2.3</repoid>
+ <reponame>HDP</reponame>
+ </repo>
+ <repo>
+ <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/suse11sp3</baseurl>
+ <repoid>HDP-UTILS-1.1.0.20</repoid>
+ <reponame>HDP-UTILS</reponame>
+ </repo>
+ </os>
+ <os family="ubuntu12">
+ <repo>
+ <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/ubuntu12/2.x/updates/2.3.0.0</baseurl>
+ <repoid>HDP-2.3</repoid>
+ <reponame>HDP</reponame>
+ </repo>
+ <repo>
+ <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/ubuntu12</baseurl>
+ <repoid>HDP-UTILS-1.1.0.20</repoid>
+ <reponame>HDP-UTILS</reponame>
+ </repo>
+ </os>
+ <os family="ubuntu7">
+ <repo>
+ <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/debian6/2.x/updates/2.3.0.0</baseurl>
+ <repoid>HDP-2.3</repoid>
+ <reponame>HDP</reponame>
+ </repo>
+ <repo>
+ <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/debian6</baseurl>
+ <repoid>HDP-UTILS-1.1.0.20</repoid>
+ <reponame>HDP-UTILS</reponame>
+ </repo>
+ </os>
+</reposinfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/role_command_order.json b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/role_command_order.json
new file mode 100644
index 0000000..ead3dd4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/role_command_order.json
@@ -0,0 +1,8 @@
+{
+ "_comment" : "Record format:",
+ "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
+ "general_deps" : {
+ "_comment" : "dependencies for all cases",
+ "MAHOUT_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"]
+ }
+}
http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ACCUMULO/configuration/accumulo-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ACCUMULO/configuration/accumulo-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ACCUMULO/configuration/accumulo-log4j.xml
new file mode 100644
index 0000000..ef119f8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ACCUMULO/configuration/accumulo-log4j.xml
@@ -0,0 +1,112 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+
+ <property>
+ <name>audit_log_level</name>
+ <value>OFF</value>
+ <description>Log level for audit logging</description>
+ </property>
+
+ <property>
+ <name>monitor_forwarding_log_level</name>
+ <value>WARN</value>
+ <description>Log level for logging forwarded to the Accumulo
+ Monitor</description>
+ </property>
+
+ <property>
+ <name>debug_log_size</name>
+ <value>1000M</value>
+ <description>Size of each debug rolling log file</description>
+ </property>
+
+ <property>
+ <name>debug_num_logs</name>
+ <value>10</value>
+ <description>Number of rolling debug log files to keep</description>
+ </property>
+
+ <property>
+ <name>info_log_size</name>
+ <value>1000M</value>
+ <description>Size of each info rolling log file</description>
+ </property>
+
+ <property>
+ <name>info_num_logs</name>
+ <value>10</value>
+ <description>Number of rolling info log files to keep</description>
+ </property>
+
+ <property>
+ <name>content</name>
+ <description>Custom log4j.properties</description>
+ <value>
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# default logging properties:
+# by default, log everything at INFO or higher to the console
+log4j.rootLogger=INFO,A1
+
+# hide Jetty junk
+log4j.logger.org.mortbay.log=WARN,A1
+
+# hide "Got brand-new compressor" messages
+log4j.logger.org.apache.hadoop.io.compress=WARN,A1
+log4j.logger.org.apache.accumulo.core.file.rfile.bcfile.Compression=WARN,A1
+
+# hide junk from TestRandomDeletes
+log4j.logger.org.apache.accumulo.test.TestRandomDeletes=WARN,A1
+
+# hide junk from VFS
+log4j.logger.org.apache.commons.vfs2.impl.DefaultFileSystemManager=WARN,A1
+
+# hide almost everything from zookeeper
+log4j.logger.org.apache.zookeeper=ERROR,A1
+
+# hide AUDIT messages in the shell, alternatively you could send them to a different logger
+log4j.logger.org.apache.accumulo.shell.Shell.audit=WARN,A1
+
+# Send most things to the console
+log4j.appender.A1=org.apache.log4j.ConsoleAppender
+log4j.appender.A1.layout.ConversionPattern=%d{ISO8601} [%-8c{2}] %-5p: %m%n
+log4j.appender.A1.layout=org.apache.log4j.PatternLayout
+
+ </value>
+ </property>
+
+</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ACCUMULO/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ACCUMULO/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ACCUMULO/kerberos.json
new file mode 100644
index 0000000..3a3ecc3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ACCUMULO/kerberos.json
@@ -0,0 +1,145 @@
+{
+ "services": [
+ {
+ "name": "ACCUMULO",
+ "identities": [
+ {
+ "name": "accumulo",
+ "principal": {
+ "value": "${accumulo-env/accumulo_user}@${realm}",
+ "type" : "user",
+ "configuration": "accumulo-env/accumulo_principal_name",
+ "local_username": "${accumulo-env/accumulo_user}"
+ },
+ "keytab": {
+ "file": "${keytab_dir}/accumulo.headless.keytab",
+ "owner": {
+ "name": "${accumulo-env/accumulo_user}",
+ "access": "r"
+ },
+ "group": {
+ "name": "${cluster-env/user_group}",
+ "access": "r"
+ },
+ "configuration": "accumulo-env/accumulo_user_keytab"
+ }
+ },
+ {
+ "name": "accumulo_service",
+ "principal": {
+ "value": "${accumulo-env/accumulo_user}/_HOST@${realm}",
+ "type" : "service",
+ "configuration": "accumulo-site/general.kerberos.principal",
+ "local_username": "${accumulo-env/accumulo_user}"
+ },
+ "keytab": {
+ "file": "${keytab_dir}/accumulo.service.keytab",
+ "owner": {
+ "name": "${accumulo-env/accumulo_user}",
+ "access": "r"
+ },
+ "group": {
+ "name": "${cluster-env/user_group}",
+ "access": ""
+ },
+ "configuration": "accumulo-site/general.kerberos.keytab"
+ }
+ },
+ {
+ "name": "accumulo_tracer",
+ "principal": {
+ "value": "tracer@${realm}",
+ "type" : "user",
+ "configuration": "accumulo-site/trace.user",
+ "local_username": "${accumulo-env/accumulo_user}"
+ },
+ "keytab": {
+ "file": "${keytab_dir}/accumulo-tracer.headless.keytab",
+ "owner": {
+ "name": "${accumulo-env/accumulo_user}",
+ "access": "r"
+ },
+ "group": {
+ "name": "${cluster-env/user_group}",
+ "access": ""
+ },
+ "configuration": "accumulo-site/trace.token.property.keytab"
+ }
+ },
+ {
+ "name": "/hdfs"
+ },
+ {
+ "name": "/smokeuser"
+ }
+ ],
+ "configurations": [
+ {
+ "accumulo-site": {
+ "instance.rpc.sasl.enabled": "true",
+ "instance.security.authenticator": "org.apache.accumulo.server.security.handler.KerberosAuthenticator",
+ "instance.security.authorizor": "org.apache.accumulo.server.security.handler.KerberosAuthorizor",
+ "instance.security.permissionHandler": "org.apache.accumulo.server.security.handler.KerberosPermissionHandler",
+ "trace.token.type": "org.apache.accumulo.core.client.security.tokens.KerberosToken",
+ "general.delegation.token.lifetime": "7d",
+ "general.delegation.token.update.interval": "1d"
+ }
+ }
+ ],
+ "components": [
+ {
+ "name": "ACCUMULO_MASTER",
+ "identities": [
+ {
+ "name": "./accumulo_service"
+ }
+ ]
+ },
+ {
+ "name": "ACCUMULO_TSERVER",
+ "identities": [
+ {
+ "name": "./accumulo_service"
+ }
+ ]
+ },
+ {
+ "name": "ACCUMULO_MONITOR",
+ "identities": [
+ {
+ "name": "./accumulo_service"
+ },
+ {
+ "name": "./accumulo_tracer"
+ }
+ ]
+ },
+ {
+ "name": "ACCUMULO_GC",
+ "identities": [
+ {
+ "name": "./accumulo_service"
+ }
+ ]
+ },
+ {
+ "name": "ACCUMULO_TRACER",
+ "identities": [
+ {
+ "name": "./accumulo_tracer"
+ }
+ ]
+ },
+ {
+ "name": "ACCUMULO_CLIENT",
+ "identities": [
+ {
+ "name": "./accumulo"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+}
+
http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ACCUMULO/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ACCUMULO/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ACCUMULO/metainfo.xml
new file mode 100644
index 0000000..1f2c281
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ACCUMULO/metainfo.xml
@@ -0,0 +1,49 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>ACCUMULO</name>
+ <version>1.7.0.2.3</version>
+ <osSpecifics>
+ <osSpecific>
+ <osFamily>redhat5,redhat6,suse11</osFamily>
+ <packages>
+ <package>
+ <name>accumulo_2_3_*</name>
+ </package>
+ </packages>
+ </osSpecific>
+ <osSpecific>
+ <osFamily>ubuntu7,ubuntu12</osFamily>
+ <packages>
+ <package>
+ <name>accumulo-2-3-.*</name>
+ </package>
+ </packages>
+ </osSpecific>
+ </osSpecifics>
+
+ <requiredServices>
+ <service>ZOOKEEPER</service>
+ </requiredServices>
+
+ </service>
+ </services>
+</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/FALCON/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/FALCON/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/FALCON/metainfo.xml
new file mode 100644
index 0000000..a4704bd
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/FALCON/metainfo.xml
@@ -0,0 +1,44 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>FALCON</name>
+ <version>0.7.0.2.3</version>
+ <osSpecifics>
+ <osSpecific>
+ <osFamily>redhat5,redhat6,suse11</osFamily>
+ <packages>
+ <package>
+ <name>falcon_2_3_*</name>
+ </package>
+ </packages>
+ </osSpecific>
+ <osSpecific>
+ <osFamily>ubuntu7,ubuntu12</osFamily>
+ <packages>
+ <package>
+ <name>falcon-2-3-.*</name>
+ </package>
+ </packages>
+ </osSpecific>
+ </osSpecifics>
+ </service>
+ </services>
+</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/FLUME/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/FLUME/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/FLUME/metainfo.xml
new file mode 100644
index 0000000..d554ec7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/FLUME/metainfo.xml
@@ -0,0 +1,50 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>FLUME</name>
+ <version>1.5.2.2.3</version>
+
+ <osSpecifics>
+ <osSpecific>
+ <osFamily>redhat5,redhat6,suse11</osFamily>
+ <packages>
+ <package>
+ <name>flume_2_3_*</name>
+ </package>
+ </packages>
+ </osSpecific>
+ <osSpecific>
+ <osFamily>ubuntu7,ubuntu12</osFamily>
+ <packages>
+ <package>
+ <name>flume-2-3-.*</name>
+ </package>
+ </packages>
+ </osSpecific>
+ </osSpecifics>
+
+ <requiredServices>
+ <service>GLUSTERFS</service>
+ </requiredServices>
+
+ </service>
+ </services>
+</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/configuration/core-site.xml
new file mode 100644
index 0000000..a861b5f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/configuration/core-site.xml
@@ -0,0 +1,43 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+ <!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
+
+<!-- file system properties -->
+
+ <property>
+ <name>fs.AbstractFileSystem.glusterfs.impl</name>
+ <value>org.apache.hadoop.fs.local.GlusterFs</value>
+ </property>
+
+ <property>
+ <name>fs.glusterfs.impl</name>
+ <value>org.apache.hadoop.fs.glusterfs.GlusterFileSystem</value>
+ </property>
+
+ <property>
+ <name>fs.defaultFS</name>
+ <value>glusterfs:///localhost:8020</value>
+ </property>
+
+
+</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/configuration/hadoop-env.xml
new file mode 100644
index 0000000..bce6b53
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/configuration/hadoop-env.xml
@@ -0,0 +1,194 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+ <property>
+ <name>hadoop_pid_dir_prefix</name>
+ <value>/var/run/hadoop</value>
+ <description>Hadoop PID Dir Prefix</description>
+ </property>
+ <property>
+ <name>hadoop_heapsize</name>
+ <value>1024</value>
+ <description>Hadoop maximum Java heap size</description>
+ </property>
+ <property>
+ <name>glusterfs_user</name>
+ <value>root</value>
+ <description></description>
+ </property>
+ <property>
+ <name>hdfs_log_dir_prefix</name>
+ <value>/var/log/hadoop</value>
+ <description>Hadoop Log Dir Prefix</description>
+ </property>
+ <property>
+ <name>namenode_heapsize</name>
+ <value>1024</value>
+ <description>NameNode Java heap size</description>
+ </property>
+ <property>
+ <name>namenode_host</name>
+ <value></value>
+ <description>NameNode Host.</description>
+ </property>
+ <property>
+ <name>snamenode_host</name>
+ <value></value>
+ <description>Secondary NameNode.</description>
+ </property>
+ <property>
+ <name>proxyuser_group</name>
+ <value>users</value>
+ <description>Proxy user group.</description>
+ </property>
+ <property>
+ <name>hdfs_user</name>
+ <value>hdfs</value>
+ <description>User to run HDFS as</description>
+ </property>
+ <!--
+ <property>
+ <name>user_group</name>
+ <value>hadoop</value>
+ <description>Proxy user group.</description>
+ </property>
+ -->
+ <!-- hadoop-env.sh -->
+ <property>
+ <name>content</name>
+ <description>This is the jinja template for hadoop-env.sh file</description>
+ <value>
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME. All others are
+# optional. When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use. Required.
+export JAVA_HOME={{java_home}}
+export HADOOP_HOME_WARN_SUPPRESS=1
+
+# Hadoop home directory
+export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
+
+# Hadoop Configuration Directory
+#TODO: if env var set that can cause problems
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
+
+{# this is different for HDP1 #}
+# Path to jsvc required by secure HDP 2.0 datanode
+export JSVC_HOME={{jsvc_path}}
+
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+
+export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
+
+# Extra Java runtime options. Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+
+# Command specific options appended to HADOOP_OPTS when specified
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
+
+HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
+HADOOP_DATANODE_OPTS="-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+# On secure datanodes, user to run the datanode as after dropping privileges
+export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}
+
+# Extra ssh options. Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+
+# Where log files are stored. $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
+
+# History server logs
+export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from. Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands. Unset by default. This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
+export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# History server pid
+export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
+
+YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes. See 'man nice'.
+
+# export HADOOP_NICENESS=10
+
+# Use libraries from standard classpath
+JAVA_JDBC_LIBS=""
+#Add libraries required by mysql connector
+for jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`
+do
+ JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by oracle connector
+for jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`
+do
+ JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by nodemanager
+MAPREDUCE_LIBS={{mapreduce_libs_path}}
+export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
+
+if [ -d "/usr/lib/tez" ]; then
+ export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf
+fi
+
+# Setting path to hdfs command line
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+
+#Mostly required for hadoop 2.0
+export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64
+ </value>
+ </property>
+
+</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/metainfo.xml
new file mode 100644
index 0000000..8bf4eb6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/metainfo.xml
@@ -0,0 +1,71 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>GLUSTERFS</name>
+ <displayName>GLUSTERFS</displayName>
+ <comment>An Hadoop Compatible File System</comment>
+ <version>2.1.3.0</version>
+ <components>
+ <component>
+ <name>GLUSTERFS_CLIENT</name>
+ <category>CLIENT</category>
+ <commandScript>
+ <script>scripts/glusterfs_client.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ <configFiles>
+ <configFile>
+ <type>env</type>
+ <fileName>hadoop-env.sh</fileName>
+ <dictionaryName>hadoop-env</dictionaryName>
+ </configFile>
+ </configFiles>
+ </component>
+ </components>
+<!--
+ <osSpecifics>
+ <osSpecific>
+ <osFamily>any<osFamily>
+ <packages>
+ <package>
+ <type>rpm</type>
+ <name>glusterfs</name>
+ </package>
+ </packages>
+ </osSpecific>
+ </osSpecifics>
+-->
+ <commandScript>
+ <script>scripts/service_check.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>300</timeout>
+ </commandScript>
+
+ <configuration-dependencies>
+ <config-type>core-site</config-type>
+ <config-type>hadoop-env</config-type>
+ <!--<config-type>hdfs-site</config-type>-->
+ </configuration-dependencies>
+
+ </service>
+ </services>
+</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/scripts/glusterfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/scripts/glusterfs.py b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/scripts/glusterfs.py
new file mode 100644
index 0000000..8b64c6a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/scripts/glusterfs.py
@@ -0,0 +1,29 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management import *
+
+def glusterfs():
+ import params
+
+ Directory( params.glusterfs_conf_dir
+ )
+
+
+
+
http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/scripts/glusterfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/scripts/glusterfs_client.py b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/scripts/glusterfs_client.py
new file mode 100644
index 0000000..840c76c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/scripts/glusterfs_client.py
@@ -0,0 +1,34 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import sys
+from resource_management import *
+from glusterfs import glusterfs
+
+class GlusterFSClient(Script):
+
+ def configure(self, env):
+ import params
+ env.set_params(params)
+ glusterfs()
+
+ def status(self, env):
+ raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+ GlusterFSClient().execute()
http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/scripts/params.py
new file mode 100644
index 0000000..6d88109
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/scripts/params.py
@@ -0,0 +1,29 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+
+#glusterfs_home = '/usr/lib/glusterfs'
+glusterfs_conf_dir = '/etc/glusterfs'
+log_dir = '/var/log/glusterfs'
+java64_home = config['hostLevelParams']['java_home']
+hadoop_home = "/usr"
http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/scripts/service_check.py
new file mode 100644
index 0000000..6619a73
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/scripts/service_check.py
@@ -0,0 +1,37 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+class GlusterFSServiceCheck(Script):
+ def service_check(self, env):
+ import params
+ env.set_params(params)
+
+ Execute(format("env GLUSTERFS_LOG_DIR=/var/log/glusterfs "
+ "GLUSTERFS_PID_DIR=/var/run/glusterfs "
+ "glusterd --version"),
+ logoutput=True,
+ tries = 3,
+ try_sleep = 20
+ )
+
+if __name__ == "__main__":
+ GlusterFSServiceCheck().execute()
http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/templates/glusterfs-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/templates/glusterfs-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/templates/glusterfs-env.sh.j2
new file mode 100644
index 0000000..1f4c746
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/templates/glusterfs-env.sh.j2
@@ -0,0 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+JAVA_HOME={{java64_home}}
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+
http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/templates/glusterfs.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/templates/glusterfs.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/templates/glusterfs.properties.j2
new file mode 100644
index 0000000..1bf6e1d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/templates/glusterfs.properties.j2
@@ -0,0 +1,36 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+# GlusterFS configuration file. All values can be overwritten by command line arguments.
+
+
+
+# load jarfile, colon separated
+#jar=/usr/lib/hadoop/lib
+
+#verbose print all log messages to screen (default to print only INFO and above to screen)
+#verbose=true
+
+#exectype local|mapreduce, mapreduce is default
+#exectype=mapreduce
+
+
http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/HBASE/configuration/hbase-site.xml
new file mode 100644
index 0000000..cf8ddd3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/HBASE/configuration/hbase-site.xml
@@ -0,0 +1,370 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+ <property>
+ <name>hbase.rootdir</name>
+ <value>glusterfs:///hbase</value>
+ <description>The directory shared by region servers and into
+ which HBase persists. The URL should be 'fully-qualified'
+ to include the filesystem scheme. For example, to specify the
+ HDFS directory '/hbase' where the HDFS instance's namenode is
+ running at namenode.example.org on port 9000, set this value to:
+ hdfs://namenode.example.org:9000/hbase. By default HBase writes
+ into /tmp. Change this configuration else all data will be lost
+ on machine restart.
+ </description>
+ </property>
+ <property>
+ <name>hbase.cluster.distributed</name>
+ <value>true</value>
+ <description>The mode the cluster will be in. Possible values are
+ false for standalone mode and true for distributed mode. If
+ false, startup will run all HBase and ZooKeeper daemons together
+ in the one JVM.
+ </description>
+ </property>
+ <property>
+ <name>hbase.master.port</name>
+ <value>60000</value>
+ <description>The port the HBase Master should bind to.</description>
+ </property>
+ <property>
+ <name>hbase.tmp.dir</name>
+ <value>/hadoop/hbase</value>
+ <description>Temporary directory on the local filesystem.
+ Change this setting to point to a location more permanent
+ than '/tmp' (The '/tmp' directory is often cleared on
+ machine restart).
+ </description>
+ </property>
+ <property>
+ <name>hbase.local.dir</name>
+ <value>${hbase.tmp.dir}/local</value>
+ <description>Directory on the local filesystem to be used as a local storage
+ </description>
+ </property>
+ <property>
+ <name>hbase.master.info.bindAddress</name>
+ <value>0.0.0.0</value>
+ <description>The bind address for the HBase Master web UI
+ </description>
+ </property>
+ <property>
+ <name>hbase.master.info.port</name>
+ <value>60010</value>
+ <description>The port for the HBase Master web UI.</description>
+ </property>
+ <property>
+ <name>hbase.regionserver.info.port</name>
+ <value>60030</value>
+ <description>The port for the HBase RegionServer web UI.</description>
+ </property>
+ <property>
+ <name>hbase.regionserver.global.memstore.upperLimit</name>
+ <value>0.4</value>
+ <description>Maximum size of all memstores in a region server before new
+ updates are blocked and flushes are forced. Defaults to 40% of heap
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.handler.count</name>
+ <value>60</value>
+ <description>Count of RPC Listener instances spun up on RegionServers.
+ Same property is used by the Master for count of master handlers.
+ Default is 10.
+ </description>
+ </property>
+ <property>
+ <name>hbase.hregion.majorcompaction</name>
+ <value>86400000</value>
+ <description>The time (in milliseconds) between 'major' compactions of all
+ HStoreFiles in a region. Default: 1 day.
+ Set to 0 to disable automated major compactions.
+ </description>
+ </property>
+
+ <property>
+ <name>hbase.regionserver.global.memstore.lowerLimit</name>
+ <value>0.38</value>
+ <description>When memstores are being forced to flush to make room in
+ memory, keep flushing until we hit this mark. Defaults to 35% of heap.
+ This value equal to hbase.regionserver.global.memstore.upperLimit causes
+ the minimum possible flushing to occur when updates are blocked due to
+ memstore limiting.
+ </description>
+ </property>
+ <property>
+ <name>hbase.hregion.memstore.block.multiplier</name>
+ <value>2</value>
+ <description>Block updates if memstore has hbase.hregion.memstore.block.multiplier
+ time hbase.hregion.flush.size bytes. Useful preventing
+ runaway memstore during spikes in update traffic. Without an
+ upper-bound, memstore fills such that when it flushes the
+ resultant flush files take a long time to compact or split, or
+ worse, we OOME
+ </description>
+ </property>
+ <property>
+ <name>hbase.hregion.memstore.flush.size</name>
+ <value>134217728</value>
+ <description>
+ Memstore will be flushed to disk if size of the memstore
+ exceeds this number of bytes. Value is checked by a thread that runs
+ every hbase.server.thread.wakefrequency.
+ </description>
+ </property>
+ <property>
+ <name>hbase.hregion.memstore.mslab.enabled</name>
+ <value>true</value>
+ <description>
+ Enables the MemStore-Local Allocation Buffer,
+ a feature which works to prevent heap fragmentation under
+ heavy write loads. This can reduce the frequency of stop-the-world
+ GC pauses on large heaps.
+ </description>
+ </property>
+ <property>
+ <name>hbase.hregion.max.filesize</name>
+ <value>10737418240</value>
+ <description>
+ Maximum HStoreFile size. If any one of a column families' HStoreFiles has
+ grown to exceed this value, the hosting HRegion is split in two.
+ Default: 1G.
+ </description>
+ </property>
+ <property>
+ <name>hbase.client.scanner.caching</name>
+ <value>100</value>
+ <description>Number of rows that will be fetched when calling next
+ on a scanner if it is not served from (local, client) memory. Higher
+ caching values will enable faster scanners but will eat up more memory
+ and some calls of next may take longer and longer times when the cache is empty.
+ Do not set this value such that the time between invocations is greater
+ than the scanner timeout; i.e. hbase.regionserver.lease.period
+ </description>
+ </property>
+ <property>
+ <name>zookeeper.session.timeout</name>
+ <value>30000</value>
+ <description>ZooKeeper session timeout.
+ HBase passes this to the zk quorum as suggested maximum time for a
+ session (This setting becomes zookeeper's 'maxSessionTimeout'). See
+ http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
+ "The client sends a requested timeout, the server responds with the
+ timeout that it can give the client. " In milliseconds.
+ </description>
+ </property>
+ <property>
+ <name>hbase.client.keyvalue.maxsize</name>
+ <value>10485760</value>
+ <description>Specifies the combined maximum allowed size of a KeyValue
+ instance. This is to set an upper boundary for a single entry saved in a
+ storage file. Since they cannot be split it helps avoiding that a region
+ cannot be split any further because the data is too large. It seems wise
+ to set this to a fraction of the maximum region size. Setting it to zero
+ or less disables the check.
+ </description>
+ </property>
+ <property>
+ <name>hbase.hstore.compactionThreshold</name>
+ <value>3</value>
+ <description>
+ If more than this number of HStoreFiles in any one HStore
+ (one HStoreFile is written per flush of memstore) then a compaction
+ is run to rewrite all HStoreFiles files as one. Larger numbers
+ put off compaction but when it runs, it takes longer to complete.
+ </description>
+ </property>
+ <property>
+ <name>hbase.hstore.flush.retries.number</name>
+ <value>120</value>
+ <description>
+ The number of times the region flush operation will be retried.
+ </description>
+ </property>
+
+ <property>
+ <name>hbase.hstore.blockingStoreFiles</name>
+ <value>10</value>
+ <description>
+ If more than this number of StoreFiles in any one Store
+ (one StoreFile is written per flush of MemStore) then updates are
+ blocked for this HRegion until a compaction is completed, or
+ until hbase.hstore.blockingWaitTime has been exceeded.
+ </description>
+ </property>
+ <property>
+ <name>hfile.block.cache.size</name>
+ <value>0.40</value>
+ <description>
+ Percentage of maximum heap (-Xmx setting) to allocate to block cache
+ used by HFile/StoreFile. Default of 0.25 means allocate 25%.
+ Set to 0 to disable but it's not recommended.
+ </description>
+ </property>
+
+ <!-- The following properties configure authentication information for
+ HBase processes when using Kerberos security. There are no default
+ values, included here for documentation purposes -->
+ <property>
+ <name>hbase.master.keytab.file</name>
+ <value>/etc/security/keytabs/hbase.service.keytab</value>
+ <description>Full path to the kerberos keytab file to use for logging in
+ the configured HMaster server principal.
+ </description>
+ </property>
+ <property>
+ <name>hbase.master.kerberos.principal</name>
+ <value>hbase/_HOST@EXAMPLE.COM</value>
+ <description>Ex. "hbase/_HOST@EXAMPLE.COM". The kerberos principal name
+ that should be used to run the HMaster process. The principal name should
+ be in the form: user/hostname@DOMAIN. If "_HOST" is used as the hostname
+ portion, it will be replaced with the actual hostname of the running
+ instance.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.keytab.file</name>
+ <value>/etc/security/keytabs/hbase.service.keytab</value>
+ <description>Full path to the kerberos keytab file to use for logging in
+ the configured HRegionServer server principal.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.kerberos.principal</name>
+ <value>hbase/_HOST@EXAMPLE.COM</value>
+ <description>Ex. "hbase/_HOST@EXAMPLE.COM". The kerberos principal name
+ that should be used to run the HRegionServer process. The principal name
+ should be in the form: user/hostname@DOMAIN. If "_HOST" is used as the
+ hostname portion, it will be replaced with the actual hostname of the
+ running instance. An entry for this principal must exist in the file
+ specified in hbase.regionserver.keytab.file
+ </description>
+ </property>
+
+ <!-- Additional configuration specific to HBase security -->
+ <property>
+ <name>hbase.superuser</name>
+ <value>hbase</value>
+ <description>List of users or groups (comma-separated), who are allowed
+ full privileges, regardless of stored ACLs, across the cluster.
+ Only used when HBase security is enabled.
+ </description>
+ </property>
+
+ <property>
+ <name>hbase.security.authentication</name>
+ <value>simple</value>
+ <description> Controls whether or not secure authentication is enabled for HBase. Possible values are 'simple'
+ (no authentication), and 'kerberos'.
+ </description>
+ </property>
+
+ <property>
+ <name>hbase.security.authorization</name>
+ <value>false</value>
+ <description>Enables HBase authorization. Set the value of this property to false to disable HBase authorization.
+ </description>
+ </property>
+
+ <property>
+ <name>hbase.coprocessor.region.classes</name>
+ <value></value>
+ <description>A comma-separated list of Coprocessors that are loaded by
+ default on all tables. For any override coprocessor method, these classes
+ will be called in order. After implementing your own Coprocessor, just put
+ it in HBase's classpath and add the fully qualified class name here.
+ A coprocessor can also be loaded on demand by setting HTableDescriptor.
+ </description>
+ </property>
+
+ <property>
+ <name>hbase.coprocessor.master.classes</name>
+ <value></value>
+ <description>A comma-separated list of
+ org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
+ loaded by default on the active HMaster process. For any implemented
+ coprocessor methods, the listed classes will be called in order. After
+ implementing your own MasterObserver, just put it in HBase's classpath
+ and add the fully qualified class name here.
+ </description>
+ </property>
+
+ <property>
+ <name>hbase.zookeeper.property.clientPort</name>
+ <value>2181</value>
+ <description>Property from ZooKeeper's config zoo.cfg.
+ The port at which the clients will connect.
+ </description>
+ </property>
+
+ <!--
+ The following three properties are used together to create the list of
+ host:peer_port:leader_port quorum servers for ZooKeeper.
+ -->
+ <property>
+ <name>hbase.zookeeper.quorum</name>
+ <value>localhost</value>
+ <description>Comma separated list of servers in the ZooKeeper Quorum.
+ For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
+ By default this is set to localhost for local and pseudo-distributed modes
+ of operation. For a fully-distributed setup, this should be set to a full
+ list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
+ this is the list of servers which we will start/stop ZooKeeper on.
+ </description>
+ </property>
+ <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
+
+ <property>
+ <name>hbase.zookeeper.useMulti</name>
+ <value>true</value>
+ <description>Instructs HBase to make use of ZooKeeper's multi-update functionality.
+ This allows certain ZooKeeper operations to complete more quickly and prevents some issues
+ with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).ยท
+ IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+
+ and will not be downgraded. ZooKeeper versions before 3.4 do not support multi-update and will
+ not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).
+ </description>
+ </property>
+ <property>
+ <name>zookeeper.znode.parent</name>
+ <value>/hbase-unsecure</value>
+ <description>Root ZNode for HBase in ZooKeeper. All of HBase's ZooKeeper
+ files that are configured with a relative path will go under this node.
+ By default, all of HBase's ZooKeeper file path are configured with a
+ relative path, so they will all go under this directory unless changed.
+ </description>
+ </property>
+
+ <property>
+ <name>hbase.defaults.for.version.skip</name>
+ <value>true</value>
+ <description>Disables version verification.</description>
+ </property>
+
+ <property>
+ <name>dfs.domain.socket.path</name>
+ <value>/var/lib/hadoop-hdfs/dn_socket</value>
+ <description>Path to domain socket.</description>
+ </property>
+
+</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/HBASE/metainfo.xml
new file mode 100644
index 0000000..0175842
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/HBASE/metainfo.xml
@@ -0,0 +1,56 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>HBASE</name>
+ <version>0.98.4.2.3</version>
+
+ <osSpecifics>
+ <osSpecific>
+ <osFamily>redhat5,redhat6,suse11</osFamily>
+ <packages>
+ <package>
+ <name>hbase_2_3_*</name>
+ </package>
+ <package>
+ <name>phoenix_2_3_*</name>
+ </package>
+ </packages>
+ </osSpecific>
+ <osSpecific>
+ <osFamily>ubuntu7,ubuntu12</osFamily>
+ <packages>
+ <package>
+ <name>hbase-2-3-.*</name>
+ </package>
+ <package>
+ <name>phoenix-2-3-.*</name>
+ </package>
+ </packages>
+ </osSpecific>
+ </osSpecifics>
+
+ <requiredServices>
+ <service>ZOOKEEPER</service>
+ </requiredServices>
+
+ </service>
+ </services>
+</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/HDFS/metainfo.xml
new file mode 100644
index 0000000..b7f88be
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/HDFS/metainfo.xml
@@ -0,0 +1,92 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>HDFS</name>
+ <version>2.7.0.2.3</version>
+
+ <osSpecifics>
+ <osSpecific>
+ <osFamily>redhat5,redhat6,suse11</osFamily>
+ <packages>
+ <package>
+ <name>hadoop_2_3_*</name>
+ </package>
+ <package>
+ <name>snappy</name>
+ </package>
+ <package>
+ <name>snappy-devel</name>
+ </package>
+ <package>
+ <name>lzo</name>
+ </package>
+ <package>
+ <name>hadooplzo_2_3_*</name>
+ </package>
+ <package>
+ <name>hadoop_2_3_*-libhdfs</name>
+ </package>
+ <package>
+ <name>ambari-log4j</name>
+ </package>
+ </packages>
+ </osSpecific>
+
+ <osSpecific>
+ <osFamily>ubuntu7,ubuntu12</osFamily>
+ <packages>
+ <package>
+ <name>hadoop-2-3-.*-client</name>
+ </package>
+ <package>
+ <name>hadoop-2-3-.*-hdfs-datanode</name>
+ </package>
+ <package>
+ <name>hadoop-2-3-.*-hdfs-journalnode</name>
+ </package>
+ <package>
+ <name>hadoop-2-3-.*-hdfs-namenode</name>
+ </package>
+ <package>
+ <name>hadoop-2-3-.*-hdfs-secondarynamenode</name>
+ </package>
+ <package>
+ <name>hadoop-2-3-.*-hdfs-zkfc</name>
+ </package>
+ <package>
+ <name>libsnappy1</name>
+ </package>
+ <package>
+ <name>libsnappy-dev</name>
+ </package>
+ <package>
+ <name>hadooplzo-2-3-.*</name>
+ </package>
+ <package>
+ <name>libhdfs0-2-3-.*</name>
+ </package>
+ </packages>
+ </osSpecific>
+ </osSpecifics>
+
+ </service>
+ </services>
+</metainfo>