You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by sc...@apache.org on 2015/04/10 20:48:19 UTC

[1/2] ambari git commit: AMBARI-10318 Add and enable 2.3.GlusterFS stack for HDP

Repository: ambari
Updated Branches:
  refs/heads/trunk 83df8c2a9 -> 40b8b2b44


http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/HIVE/configuration/webhcat-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/HIVE/configuration/webhcat-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/HIVE/configuration/webhcat-site.xml
new file mode 100644
index 0000000..a7caae4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/HIVE/configuration/webhcat-site.xml
@@ -0,0 +1,135 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- 
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+<!-- The default settings for Templeton. -->
+<!-- Edit templeton-site.xml to change settings for your local -->
+<!-- install. -->
+
+<configuration supports_final="true">
+
+  <property>
+    <name>templeton.port</name>
+    <value>50111</value>
+    <description>The HTTP port for the main server.</description>
+  </property>
+
+  <property>
+    <name>templeton.hadoop.conf.dir</name>
+    <value>/etc/hadoop/conf</value>
+    <description>The path to the Hadoop configuration.</description>
+  </property>
+
+  <property>
+    <name>templeton.jar</name>
+    <value>/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar</value>
+    <description>The path to the Templeton jar file.</description>
+  </property>
+
+  <property>
+    <name>templeton.libjars</name>
+    <value>/usr/lib/zookeeper/zookeeper.jar</value>
+    <description>Jars to add the the classpath.</description>
+  </property>
+
+  <property>
+    <name>templeton.hadoop</name>
+    <value>/usr/bin/hadoop</value>
+    <description>The path to the Hadoop executable.</description>
+  </property>
+
+  <property>
+    <name>templeton.pig.archive</name>
+    <value>glusterfs:///apps/webhcat/pig.tar.gz</value>
+    <description>The path to the Pig archive.</description>
+  </property>
+
+  <property>
+    <name>templeton.pig.path</name>
+    <value>pig.tar.gz/pig/bin/pig</value>
+    <description>The path to the Pig executable.</description>
+  </property>
+
+  <property>
+    <name>templeton.hcat</name>
+    <value>/usr/bin/hcat</value>
+    <description>The path to the hcatalog executable.</description>
+  </property>
+
+  <property>
+    <name>templeton.hive.archive</name>
+    <value>glusterfs:///apps/webhcat/hive.tar.gz</value>
+    <description>The path to the Hive archive.</description>
+  </property>
+
+  <property>
+    <name>templeton.hive.home</name>
+    <value>hive.tar.gz/hive</value>
+    <description>The path to the Hive home within the tar. Has no effect if templeton.hive.archive is not set.</description>
+  </property>
+
+  <property>
+    <name>templeton.hcat.home</name>
+    <value>hive.tar.gz/hive/hcatalog</value>
+    <description>The path to the HCat home within the tar. Has no effect if templeton.hive.archive is not set.</description>
+  </property>
+
+  <property>
+    <name>templeton.hive.path</name>
+    <value>hive.tar.gz/hive/bin/hive</value>
+    <description>The path to the Hive executable.</description>
+  </property>
+
+  <property>
+    <name>templeton.hive.properties</name>
+    <value>hive.metastore.local=false,hive.metastore.uris=thrift://localhost:9083,hive.metastore.sasl.enabled=false</value>
+    <description>Properties to set when running hive.</description>
+  </property>
+
+
+  <property>
+    <name>templeton.zookeeper.hosts</name>
+    <value>localhost:2181</value>
+    <description>ZooKeeper servers, as comma separated host:port pairs</description>
+  </property>
+
+  <property>
+    <name>templeton.storage.class</name>
+    <value>org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage</value>
+    <description>The class to use as storage</description>
+  </property>
+
+  <property>
+    <name>templeton.override.enabled</name>
+    <value>false</value>
+    <description>Enable the override path in templeton.override.jars</description>
+  </property>
+
+  <property>
+    <name>templeton.streaming.jar</name>
+    <value>glusterfs:///apps/webhcat/hadoop-streaming.jar</value>
+    <description>The hdfs path to the Hadoop streaming jar file.</description>
+  </property> 
+
+  <property>
+    <name>templeton.exec.timeout</name>
+    <value>60000</value>
+    <description>Time out for templeton api</description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/HIVE/metainfo.xml
new file mode 100644
index 0000000..eb4dd3c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/HIVE/metainfo.xml
@@ -0,0 +1,85 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HIVE</name>
+      <version>0.15.0.2.3</version>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>mysql-connector-java</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>redhat5,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>hive_2_3_*</name>
+            </package>
+            <package>
+              <name>hive_2_3_*-hcatalog</name>
+            </package>
+            <package>
+              <name>hive_2_3_*-webhcat</name>
+            </package>
+            <package>
+              <name>mysql</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>ubuntu7,ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>hive-2-3-.*</name>
+            </package>
+            <package>
+              <name>hive-2-3-.*-hcatalog</name>
+            </package>
+            <package>
+              <name>hive-2-3-.*-webhcat</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>redhat5,redhat6,ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>mysql-server</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>suse11</osFamily>
+          <packages>
+            <package>
+              <name>mysql-client</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/KAFKA/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/KAFKA/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/KAFKA/metainfo.xml
new file mode 100644
index 0000000..80ffaf0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/KAFKA/metainfo.xml
@@ -0,0 +1,44 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>KAFKA</name>
+      <version>0.8.2.2.3</version>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>redhat5,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>kafka_2_3_*</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>ubuntu7,ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>kafka-2-3-.*</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/KERBEROS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/KERBEROS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/KERBEROS/metainfo.xml
new file mode 100644
index 0000000..1a931a3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/KERBEROS/metainfo.xml
@@ -0,0 +1,25 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>KERBEROS</name>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/KNOX/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/KNOX/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/KNOX/metainfo.xml
new file mode 100644
index 0000000..1265026
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/KNOX/metainfo.xml
@@ -0,0 +1,44 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>KNOX</name>
+      <version>0.6.0.2.3</version>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>redhat5,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>knox_2_3_*</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>ubuntu7,ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>knox-2-3-.*</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/MAHOUT/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/MAHOUT/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/MAHOUT/metainfo.xml
new file mode 100644
index 0000000..6c4ef96
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/MAHOUT/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <schemaVersion>2.0</schemaVersion>
+    <services>
+        <service>
+            <name>MAHOUT</name>
+            <extends>common-services/MAHOUT/1.0.0.2.3</extends>
+        </service>
+    </services>
+</metainfo>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/OOZIE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/OOZIE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/OOZIE/metainfo.xml
new file mode 100644
index 0000000..3df7d58
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/OOZIE/metainfo.xml
@@ -0,0 +1,70 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>OOZIE</name>
+      <version>5.0.0.2.3</version>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>zip</name>
+            </package>
+            <package>
+              <name>mysql-connector-java</name>
+            </package>
+            <package>
+              <name>extjs</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>redhat5,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>oozie_2_3_*</name>
+            </package>
+            <package>
+              <name>falcon_2_3_*</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>ubuntu7,ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>oozie-2-3-.*</name>
+            </package>
+            <package>
+              <name>falcon-2-3-.*</name>
+            </package>
+            <package>
+              <name>extjs</name>
+            </package>
+            <package>
+              <name>libxml2-utils</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/PIG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/PIG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/PIG/metainfo.xml
new file mode 100644
index 0000000..70b78c6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/PIG/metainfo.xml
@@ -0,0 +1,52 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>PIG</name>
+      <version>0.15.0.2.3</version>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>datafu</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>redhat5,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>pig_2_3_*</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>ubuntu7,ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>pig-2-3-.*</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/RANGER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/RANGER/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/RANGER/metainfo.xml
new file mode 100644
index 0000000..35b76df
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/RANGER/metainfo.xml
@@ -0,0 +1,54 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>RANGER</name>
+      <version>0.5.0.2.3</version>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>redhat5,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>ranger_2_3_*-admin</name>
+            </package>
+            <package>
+              <name>ranger_2_3_*-usersync</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>ubuntu7,ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>ranger-2-3-.*-admin</name>
+            </package>
+            <package>
+              <name>ranger-2-3-.*-usersync</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+    </service>
+  </services>
+</metainfo>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/SLIDER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/SLIDER/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/SLIDER/metainfo.xml
new file mode 100644
index 0000000..09537e2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/SLIDER/metainfo.xml
@@ -0,0 +1,56 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>SLIDER</name>
+      <version>0.61.0.2.3</version>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>redhat5,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>slider_2_3_*</name>
+            </package>
+            <package>
+              <name>storm_2_3_*</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>ubuntu7,ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>slider-2-3-.*</name>
+            </package>
+            <package>
+              <name>storm-2-3-.*</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <requiredServices>
+        <service>YARN</service>
+        <service>ZOOKEEPER</service>
+      </requiredServices>
+      
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/SPARK/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/SPARK/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/SPARK/metainfo.xml
new file mode 100644
index 0000000..2116c02
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/SPARK/metainfo.xml
@@ -0,0 +1,53 @@
+<?xml version="1.0"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<metainfo>
+    <schemaVersion>2.0</schemaVersion>
+    <services>
+        <service>
+          <name>SPARK</name>
+          <version>1.2.1.2.3</version>
+          <osSpecifics>
+            <osSpecific>
+              <osFamily>redhat5,redhat6,suse11</osFamily>
+              <packages>
+                <package>
+                  <name>spark_2_3_*</name>
+                </package>
+                <package>
+                  <name>spark_2_3_*-python</name>
+                </package>
+              </packages>
+            </osSpecific>
+            <osSpecific>
+              <osFamily>ubuntu7,ubuntu12</osFamily>
+              <packages>
+                <package>
+                  <name>spark-2-3-.*</name>
+                </package>
+                <package>
+                  <name>spark-2-3-.*-python</name>
+                </package>
+              </packages>
+            </osSpecific>
+          </osSpecifics>
+        </service>
+    </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/SQOOP/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/SQOOP/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/SQOOP/metainfo.xml
new file mode 100644
index 0000000..f443b85
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/SQOOP/metainfo.xml
@@ -0,0 +1,57 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>SQOOP</name>
+      <version>1.4.6.2.3</version>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>mysql-connector-java</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>redhat5,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>sqoop_2_3_*</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>ubuntu7,ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>sqoop-2-3-.*</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <requiredServices>
+        <service>GLUSTERFS</service>
+      </requiredServices>      
+      
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/STORM/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/STORM/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/STORM/metainfo.xml
new file mode 100644
index 0000000..0666aa5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/STORM/metainfo.xml
@@ -0,0 +1,45 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>STORM</name>
+      <version>0.9.3.2.3</version>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>redhat5,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>storm_2_3_*</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>ubuntu7,ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>storm-2-3-.*</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/TEZ/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/TEZ/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/TEZ/metainfo.xml
new file mode 100644
index 0000000..8ccc6f0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/TEZ/metainfo.xml
@@ -0,0 +1,46 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>TEZ</name>
+      <version>0.7.0.2.3</version>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>redhat5,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>tez_2_3_*</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>ubuntu7,ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>tez-2-3-.*</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/YARN/configuration-mapred/core-site.xml.2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/YARN/configuration-mapred/core-site.xml.2 b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/YARN/configuration-mapred/core-site.xml.2
new file mode 100644
index 0000000..3a2af49
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/YARN/configuration-mapred/core-site.xml.2
@@ -0,0 +1,20 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<configuration>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/YARN/configuration-mapred/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/YARN/configuration-mapred/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/YARN/configuration-mapred/mapred-site.xml
new file mode 100644
index 0000000..671f328
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/YARN/configuration-mapred/mapred-site.xml
@@ -0,0 +1,88 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
+
+
+<!-- GLUSTERFS properties -->
+  <property>
+    <name>mapreduce.jobhistory.intermediate-done-dir</name>
+    <value>glusterfs:///mr-history/tmp</value>
+    <description>
+      Directory where history files are written by MapReduce jobs.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.jobhistory.done-dir</name>
+    <value>glusterfs:///mr-history/done</value>
+    <description>
+      Directory where history files are managed by the MR JobHistory Server.
+    </description>
+  </property>
+  <property>
+     <name>yarn.app.mapreduce.am.staging-dir</name>
+     <value>glusterfs:///user</value>
+     <description>
+       The staging dir used while submitting jobs.
+     </description>
+  </property>
+  <property>
+     <name>mapred.healthChecker.script.path</name>
+     <value>glusterfs:///mapred/jobstatus</value>
+   </property>
+  <property>
+     <name>mapred.job.tracker.history.completed.location</name>
+     <value>glusterfs:///mapred/history/done</value>
+  </property>
+
+  <property>
+    <name>mapred.system.dir</name>
+    <value>glusterfs:///mapred/system</value>
+  </property>
+
+  <property>
+    <name>mapreduce.jobtracker.staging.root.dir</name>
+    <value>glusterfs:///user</value>
+  </property>
+
+<property>
+<name>mapred.healthChecker.script.path</name>
+<value>glusterfs:///mapred/jobstatus</value>
+</property>
+
+<property>
+<name>mapred.job.tracker.history.completed.location</name>
+<value>glusterfs:///mapred/history/done</value>
+</property>
+
+<property>
+<name>mapred.system.dir</name>
+<value>glusterfs:///mapred/system</value>
+</property>
+
+<property>
+<name>mapreduce.jobtracker.staging.root.dir</name>
+<value>glusterfs:///user</value>
+</property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/YARN/configuration-mapred/mapred-site.xml.2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/YARN/configuration-mapred/mapred-site.xml.2 b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/YARN/configuration-mapred/mapred-site.xml.2
new file mode 100644
index 0000000..6abb71d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/YARN/configuration-mapred/mapred-site.xml.2
@@ -0,0 +1,68 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
+
+
+<!-- GLUSTERFS properties -->
+  <property>
+    <name>mapreduce.jobhistory.intermediate-done-dir</name>
+    <value>glusterfs:///mr-history/tmp</value>
+    <description>
+      Directory where history files are written by MapReduce jobs.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.jobhistory.done-dir</name>
+    <value>glusterfs:///mr-history/done</value>
+    <description>
+      Directory where history files are managed by the MR JobHistory Server.
+    </description>
+  </property>
+  <property>
+     <name>yarn.app.mapreduce.am.staging-dir</name>
+     <value>glusterfs:///user</value>
+     <description>
+       The staging dir used while submitting jobs.
+     </description>
+  </property>
+  <property>
+     <name>mapred.healthChecker.script.path</name>
+     <value>glusterfs:///mapred/jobstatus</value>
+   </property>
+  <property>
+     <name>mapred.job.tracker.history.completed.location</name>
+     <value>glusterfs:///mapred/history/done</value>
+  </property>
+
+  <property>
+    <name>mapred.system.dir</name>
+    <value>glusterfs:///mapred/system</value>
+  </property>
+
+  <property>
+    <name>mapreduce.jobtracker.staging.root.dir</name>
+    <value>glusterfs:///user</value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/YARN/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/YARN/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/YARN/configuration/capacity-scheduler.xml
new file mode 100644
index 0000000..63d56b0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/YARN/configuration/capacity-scheduler.xml
@@ -0,0 +1,45 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<configuration supports_final="false">
+  <property>
+    <name>yarn.scheduler.capacity.resource-calculator</name>
+    <value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
+    <description></description>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.root.accessible-node-labels</name>
+    <value>*</value>
+    <description></description>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.root.accessible-node-labels.default.capacity</name>
+    <value>-1</value>
+    <description></description>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.root.accessible-node-labels.default.maximum-capacity</name>
+    <value>-1</value>
+    <description></description>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.root.default-node-label-expression</name>
+    <value> </value>
+    <description></description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/YARN/configuration/yarn-site.xml
new file mode 100644
index 0000000..d562246
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/YARN/configuration/yarn-site.xml
@@ -0,0 +1,372 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
+
+  <property>
+    <name>yarn.application.classpath</name>
+    <value>$HADOOP_CONF_DIR,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*</value>
+    <description>Classpath for typical applications.</description>
+  </property>
+  <property>
+    <name>hadoop.registry.rm.enabled</name>
+    <value>false</value>
+    <description>
+      Is the registry enabled: does the RM start it up, create the user and system paths, and purge service records when containers, application attempts and applications complete
+    </description>
+  </property>
+  <property>
+    <name>hadoop.registry.zk.quorum</name>
+    <value>localhost:2181</value>
+    <description>
+      List of hostname:port pairs defining the zookeeper quorum binding for the registry
+    </description>
+  </property>
+  <property>
+    <name>yarn.nodemanager.recovery.enabled</name>
+    <value>true</value>
+    <description>Enable the node manager to recover after starting</description>
+  </property>
+  <property>
+    <name>yarn.nodemanager.recovery.dir</name>
+    <value>{{yarn_log_dir_prefix}}/nodemanager/recovery-state</value>
+    <description>
+      The local filesystem directory in which the node manager will store
+      state when recovery is enabled.
+    </description>
+  </property>
+  <property>
+    <name>yarn.client.nodemanager-connect.retry-interval-ms</name>
+    <value>10000</value>
+    <description>Time interval between each attempt to connect to NM</description>
+  </property>
+  <property>
+    <name>yarn.client.nodemanager-connect.max-wait-ms</name>
+    <value>60000</value>
+    <description>Max time to wait to establish a connection to NM</description>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.recovery.enabled</name>
+    <value>true</value>
+    <description>
+      Enable RM to recover state after starting.
+      If true, then yarn.resourcemanager.store.class must be specified.
+    </description>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.work-preserving-recovery.enabled</name>
+    <value>true</value>
+    <description>
+      Enable RM work preserving recovery. This configuration is private to YARN for experimenting the feature.
+    </description>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.store.class</name>
+    <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
+    <description>
+      The class to use as the persistent store.
+      If org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore is used,
+      the store is implicitly fenced; meaning a single ResourceManager
+      is able to use the store at any point in time.
+    </description>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.zk-address</name>
+    <value>localhost:2181</value>
+    <description>
+      List Host:Port of the ZooKeeper servers to be used by the RM. comma separated host:port pairs, each corresponding to a zk server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" If the optional chroot suffix is used the example would look like: "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002/app/a" where the client would be rooted at "/app/a" and all paths would be relative to this root - ie getting/setting/etc...  "/foo/bar" would result in operations being run on "/app/a/foo/bar" (from the server perspective).
+    </description>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.zk-state-store.parent-path</name>
+    <value>/rmstore</value>
+    <description>Full path of the ZooKeeper znode where RM state will be stored. This must be supplied when using org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore as the value for yarn.resourcemanager.store.class</description>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.zk-acl</name>
+    <value>world:anyone:rwcda </value>
+    <description>ACL's to be used for ZooKeeper znodes.</description>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms</name>
+    <value>10000</value>
+    <description>Set the amount of time RM waits before allocating new containers on work-preserving-recovery. Such wait period gives RM a chance to settle down resyncing with NMs in the cluster on recovery, before assigning new containers to applications.</description>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.connect.retry-interval.ms</name>
+    <value>30000</value>
+    <description>How often to try connecting to the ResourceManager.</description>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.connect.max-wait.ms</name>
+    <value>900000</value>
+    <description>Maximum time to wait to establish connection to ResourceManager</description>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.zk-retry-interval-ms</name>
+    <value>1000</value>
+    <description>"Retry interval in milliseconds when connecting to ZooKeeper.
+      When HA is enabled, the value here is NOT used. It is generated
+      automatically from yarn.resourcemanager.zk-timeout-ms and
+      yarn.resourcemanager.zk-num-retries."
+    </description>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.zk-num-retries</name>
+    <value>1000</value>
+    <description>Number of times RM tries to connect to ZooKeeper.</description>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.zk-timeout-ms</name>
+    <value>10000</value>
+    <description>ZooKeeper session timeout in milliseconds. Session expiration is managed by the ZooKeeper cluster itself, not by the client. This value is used by the cluster to determine when the client's session expires. Expirations happens when the cluster does not hear from the client within the specified session timeout period (i.e. no heartbeat).</description>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.state-store.max-completed-applications</name>
+    <value>${yarn.resourcemanager.max-completed-applications}</value>
+    <description>The maximum number of completed applications RM state store keeps, less than or equals to ${yarn.resourcemanager.max-completed-applications}. By default, it equals to ${yarn.resourcemanager.max-completed-applications}. This ensures that the applications kept in the state store are consistent with the applications remembered in RM memory. Any values larger than ${yarn.resourcemanager.max-completed-applications} will be reset to ${yarn.resourcemanager.max-completed-applications}. Note that this value impacts the RM recovery performance.Typically,  a smaller value indicates better performance on RM recovery.</description>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.fs.state-store.retry-policy-spec</name>
+    <value>2000, 500</value>
+    <description>hdfs client retry policy specification. hdfs client retry is always enabled. Specified in pairs of sleep-time and number-of-retries and (t0, n0), (t1, n1), ..., the first n0 retries sleep t0 milliseconds on average, the following n1 retries sleep t1 milliseconds on average, and so on.</description>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.fs.state-store.uri</name>
+    <value> </value>
+    <description>RI pointing to the location of the FileSystem path where RM state will be stored. This must be supplied when using org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore as the value for yarn.resourcemanager.store.class </description>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.ha.enabled</name>
+    <value>false</value>
+    <description>enable RM HA or not</description>
+  </property>
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.resources-handler.class</name>
+    <value>org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler</value>
+    <description>Pre-requisite to use CGroups</description>
+  </property>
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.cgroups.hierarchy</name>
+    <value>hadoop-yarn</value>
+    <description>Name of the Cgroups hierarchy under which all YARN jobs will be launched</description>
+  </property>
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.cgroups.mount</name>
+    <value>false</value>
+    <description>If true, YARN will automount the CGroup, however the directory needs to already exist; else, the cgroup should be mounted by the admin</description>
+  </property>
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage</name>
+    <value>false</value>
+    <description>Strictly limit CPU resource usage to allocated usage even if spare CPU is available</description>
+  </property>
+  <property>
+    <name>yarn.nodemanager.resource.cpu-vcores</name>
+    <value>8</value>
+    <description></description>
+    <display-name>Total NM CPU vCores available to Containers</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>32</maximum>
+    </value-attributes>
+  </property>
+  <property>
+    <name>yarn.nodemanager.resource.percentage-physical-cpu-limit</name>
+    <value>80</value>
+    <description>The amount of CPU allocated for YARN containers - only effective when used with CGroups</description>
+    <display-name>% of Total NM CPU available to Containers</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>100</maximum>
+    </value-attributes>
+  </property>
+  <property>
+    <name>yarn.node-labels.manager-class</name>
+    <value>org.apache.hadoop.yarn.server.resourcemanager.nodelabels.MemoryRMNodeLabelsManager</value>
+    <description>If user want to enable this feature, specify it to "org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager</description>
+  </property>
+  <property>
+    <name>yarn.node-labels.fs-store.retry-policy-spec</name>
+    <value>2000, 500</value>
+    <description></description>
+  </property>
+  <property>
+    <name>yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb</name>
+    <value>1000</value>
+    <description>This is related to disk size on the machines, admins should set one of yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb or yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage but not both. If both are set, the more conservative value will be used</description>
+  </property>
+  <property>
+    <name>yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage</name>
+    <value>90</value>
+    <description>This is related to disk size on the machines, admins should set one of yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb or yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage but not both. If both are set, the more conservative value will be used</description>
+  </property>
+  <property>
+    <name>yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</name>
+    <value>-1</value>
+    <description>Defines how often NMs wake up to upload log files. The default value is -1. By default, the logs will be uploaded whenthe application is finished. By setting this configure, logs can be uploaded periodically when the application is running. The minimum rolling-interval-seconds can be set is 3600.</description>
+  </property>
+  <property>
+    <name>yarn.nodemanager.log-aggregation.debug-enabled</name>
+    <value>false</value>
+    <description>
+      This configuration is for debug and test purpose.
+      By setting this configuration as true.
+      We can break the lower bound of yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</description>
+  </property>
+  <property>
+    <name>yarn.nodemanager.log-aggregation.num-log-files-per-app</name>
+    <value>30</value>
+    <description>This is temporary solution. The configuration will be deleted once, we find a more scalable method to only write a single log file per LRS.</description>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.system-metrics-publisher.enabled</name>
+    <value>true</value>
+    <description></description>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size</name>
+    <value>10</value>
+    <description></description>
+  </property>
+  <property>
+    <name>yarn.timeline-service.client.max-retries</name>
+    <value>30</value>
+    <description></description>
+  </property>
+  <property>
+    <name>yarn.timeline-service.client.retry-interval-ms</name>
+    <value>1000</value>
+    <description></description>
+  </property>
+  <property>
+    <name>yarn.timeline-service.ttl-enable</name>
+    <value>true</value>
+    <description>
+      Enable age off of timeline store data.
+    </description>
+  </property>
+  <property>
+    <name>yarn.timeline-service.leveldb-timeline-store.path</name>
+    <value>/hadoop/yarn/timeline</value>
+    <description>Store file name for leveldb timeline store.</description>
+  </property>
+  <property>
+    <name>yarn.timeline-service.leveldb-timeline-store.read-cache-size</name>
+    <value>104857600</value>
+    <description>
+      Size of read cache for uncompressed blocks for leveldb timeline store in bytes.
+    </description>
+  </property>
+  <property>
+    <name>yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size</name>
+    <value>10000</value>
+    <description>
+      Size of cache for recently read entity start times for leveldb timeline store in number of entities.
+    </description>
+  </property>
+  <property>
+    <name>yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size</name>
+    <value>10000</value>
+    <description>
+      Size of cache for recently written entity start times for leveldb timeline store in number of entities.
+    </description>
+  </property>
+  <property>
+    <name>yarn.timeline-service.http-authentication.type</name>
+    <value>simple</value>
+    <description>
+      Defines authentication used for the Timeline Server HTTP endpoint.
+      Supported values are: simple | kerberos | $AUTHENTICATION_HANDLER_CLASSNAME
+    </description>
+  </property>
+  <property>
+    <name>yarn.timeline-service.http-authentication.simple.anonymous.allowed</name>
+    <value>true</value>
+    <description></description>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled</name>
+    <value>false</value>
+    <description>
+      Flag to enable override of the default kerberos authentication filter with
+      the RM authentication filter to allow authentication using delegation
+      tokens(fallback to kerberos if the tokens are missing).
+      Only applicable when the http authentication type is kerberos.
+    </description>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.bind-host</name>
+    <value>0.0.0.0</value>
+    <description>Default value is 0.0.0.0, when this is set the service will bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans quotes) should be the two available values, with blank as the default.</description>
+  </property>
+  <property>
+    <name>yarn.nodemanager.bind-host</name>
+    <value>0.0.0.0</value>
+    <description>Default value is 0.0.0.0, when this is set the service will bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans quotes) should be the two available values, with blank as the default.</description>
+  </property>
+  <property>
+    <name>yarn.timeline-service.bind-host</name>
+    <value>0.0.0.0</value>
+    <description>Default value is 0.0.0.0, when this is set the service will bind on all interfaces.  I think these two options (blank, "0.0.0.0" sans quotes) should be the two available values, with blank as the default.</description>
+  </property>
+  <property>
+    <name>yarn.node-labels.fs-store.root-dir</name>
+    <value>/system/yarn/node-labels</value>
+    <description></description>
+  </property>
+  <property>
+    <name>yarn.scheduler.minimum-allocation-vcores</name>
+    <value>1</value>
+    <description></description>
+    <display-name>YARN Container Minimum vCores</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>8</maximum>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>yarn-site.xml</type>
+        <name>yarn.nodemanager.resource.cpu-vcores</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>yarn.scheduler.maximum-allocation-vcores</name>
+    <value>8</value>
+    <description></description>
+    <display-name>YARN Container Maximum vCores</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>8</maximum>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>yarn-site.xml</type>
+        <name>yarn.nodemanager.resource.cpu-vcores</name>
+      </property>
+    </depends-on>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/YARN/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/YARN/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/YARN/metainfo.xml
new file mode 100644
index 0000000..d185ee8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/YARN/metainfo.xml
@@ -0,0 +1,104 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>YARN</name>
+      <version>2.7.0.2.3</version>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>redhat5,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>hadoop_2_3_*-yarn</name>
+            </package>
+            <package>
+              <name>hadoop_2_3_*-mapreduce</name>
+            </package>
+            <package>
+              <name>hadoop_2_3_*-hdfs</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>ubuntu7,ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>hadoop-2-3-.*-yarn</name>
+            </package>
+            <package>
+              <name>hadoop-2-3-.*-mapreduce</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      
+      <requiredServices>
+        <service>GLUSTERFS</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>yarn-site</config-type>
+        <config-type>capacity-scheduler</config-type>
+        <config-type>core-site</config-type>
+        <config-type>global</config-type>
+        <config-type>yarn-log4j</config-type>
+      </configuration-dependencies>      
+    </service>
+
+    <service>
+      <name>MAPREDUCE2</name>
+      <version>2.7.0.2.3</version>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>redhat5,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>hadoop_2_3_*-mapreduce</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>ubuntu7,ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>hadoop-2-3-.*-mapreduce</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      <configuration-dir>configuration-mapred</configuration-dir>
+
+      <requiredServices>
+        <service>YARN</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>core-site</config-type>
+        <config-type>yarn-env</config-type>
+        <config-type>mapred-site</config-type>
+        <config-type>mapred-env</config-type>
+        <config-type>mapred-queue-acls</config-type>
+      </configuration-dependencies>      
+    </service>
+
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ZOOKEEPER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ZOOKEEPER/metainfo.xml
new file mode 100644
index 0000000..f227abe
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ZOOKEEPER/metainfo.xml
@@ -0,0 +1,45 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ZOOKEEPER</name>
+      <version>3.4.6.2.3</version>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>redhat5,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>zookeeper_2_3_*</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>ubuntu7,ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>zookeeper-2-3-.*</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/stack_advisor.py
new file mode 100644
index 0000000..bffbc26
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/stack_advisor.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env ambari-python-wrap
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+class HDP23StackAdvisor(HDP22StackAdvisor):
+  pass
\ No newline at end of file


[2/2] ambari git commit: AMBARI-10318 Add and enable 2.3.GlusterFS stack for HDP

Posted by sc...@apache.org.
AMBARI-10318 Add and enable 2.3.GlusterFS stack for HDP


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/40b8b2b4
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/40b8b2b4
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/40b8b2b4

Branch: refs/heads/trunk
Commit: 40b8b2b445746143dbea37c8cea5c943c6f3149c
Parents: 83df8c2
Author: Scott Creeley <sc...@redhat.com>
Authored: Fri Apr 10 14:47:03 2015 -0400
Committer: Scott Creeley <sc...@redhat.com>
Committed: Fri Apr 10 14:47:38 2015 -0400

----------------------------------------------------------------------
 .../2.3.GlusterFS/configuration/cluster-env.xml | 107 ++++++
 .../configuration/cluster-env.xml.noversion     |  56 +++
 .../configuration/cluster-env.xml.version       | 107 ++++++
 .../stacks/HDP/2.3.GlusterFS/metainfo.xml       |  23 ++
 .../stacks/HDP/2.3.GlusterFS/repos/repoinfo.xml |  80 ++++
 .../HDP/2.3.GlusterFS/role_command_order.json   |   8 +
 .../ACCUMULO/configuration/accumulo-log4j.xml   | 112 ++++++
 .../services/ACCUMULO/kerberos.json             | 145 ++++++++
 .../services/ACCUMULO/metainfo.xml              |  49 +++
 .../2.3.GlusterFS/services/FALCON/metainfo.xml  |  44 +++
 .../2.3.GlusterFS/services/FLUME/metainfo.xml   |  50 +++
 .../GLUSTERFS/configuration/core-site.xml       |  43 +++
 .../GLUSTERFS/configuration/hadoop-env.xml      | 194 ++++++++++
 .../services/GLUSTERFS/metainfo.xml             |  71 ++++
 .../GLUSTERFS/package/scripts/glusterfs.py      |  29 ++
 .../package/scripts/glusterfs_client.py         |  34 ++
 .../GLUSTERFS/package/scripts/params.py         |  29 ++
 .../GLUSTERFS/package/scripts/service_check.py  |  37 ++
 .../package/templates/glusterfs-env.sh.j2       |  18 +
 .../package/templates/glusterfs.properties.j2   |  36 ++
 .../services/HBASE/configuration/hbase-site.xml | 370 ++++++++++++++++++
 .../2.3.GlusterFS/services/HBASE/metainfo.xml   |  56 +++
 .../2.3.GlusterFS/services/HDFS/metainfo.xml    |  92 +++++
 .../HIVE/configuration/webhcat-site.xml         | 135 +++++++
 .../2.3.GlusterFS/services/HIVE/metainfo.xml    |  85 +++++
 .../2.3.GlusterFS/services/KAFKA/metainfo.xml   |  44 +++
 .../services/KERBEROS/metainfo.xml              |  25 ++
 .../2.3.GlusterFS/services/KNOX/metainfo.xml    |  44 +++
 .../2.3.GlusterFS/services/MAHOUT/metainfo.xml  |  26 ++
 .../2.3.GlusterFS/services/OOZIE/metainfo.xml   |  70 ++++
 .../HDP/2.3.GlusterFS/services/PIG/metainfo.xml |  52 +++
 .../2.3.GlusterFS/services/RANGER/metainfo.xml  |  54 +++
 .../2.3.GlusterFS/services/SLIDER/metainfo.xml  |  56 +++
 .../2.3.GlusterFS/services/SPARK/metainfo.xml   |  53 +++
 .../2.3.GlusterFS/services/SQOOP/metainfo.xml   |  57 +++
 .../2.3.GlusterFS/services/STORM/metainfo.xml   |  45 +++
 .../HDP/2.3.GlusterFS/services/TEZ/metainfo.xml |  46 +++
 .../YARN/configuration-mapred/core-site.xml.2   |  20 +
 .../YARN/configuration-mapred/mapred-site.xml   |  88 +++++
 .../YARN/configuration-mapred/mapred-site.xml.2 |  68 ++++
 .../YARN/configuration/capacity-scheduler.xml   |  45 +++
 .../services/YARN/configuration/yarn-site.xml   | 372 +++++++++++++++++++
 .../2.3.GlusterFS/services/YARN/metainfo.xml    | 104 ++++++
 .../services/ZOOKEEPER/metainfo.xml             |  45 +++
 .../HDP/2.3.GlusterFS/services/stack_advisor.py |  21 ++
 45 files changed, 3345 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/configuration/cluster-env.xml
new file mode 100644
index 0000000..485125c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/configuration/cluster-env.xml
@@ -0,0 +1,107 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+
+  <!-- The properties that end in tar_source describe the pattern of where the tar.gz files come from.
+  They will replace {{ hdp_stack_version }} with the "#.#.#.#" value followed by -* (which is the build number in HDP 2.2).
+  When copying those tarballs, Ambari will look up the corresponding tar_destination_folder property to know where it
+  should be copied to.
+  All of the destination folders must begin with hdfs://
+  Please note that the spaces inside of {{ ... }} are important.
+
+  IMPORTANT: Any properties included here must also be declared in site_properties.js
+
+  -->
+  <!-- Tez tarball is needed by Hive Server when using the Tez execution engine. -->
+  <property>
+    <name>tez_tar_source</name>
+    <value>/usr/hdp/current/tez-client/lib/tez.tar.gz</value>
+    <description>Source file path that uses dynamic variables and regex to copy the file to HDFS.</description>
+  </property>
+  <property>
+    <name>tez_tar_destination_folder</name>
+    <value>glusterfs:///apps/{{ hdp_stack_version }}/tez/</value>
+    <description>Destination HDFS folder for the file.</description>
+  </property>
+
+  <!-- Hive tarball is needed by WebHCat. -->
+  <property>
+    <name>hive_tar_source</name>
+    <value>/usr/hdp/current/hive-client/hive.tar.gz</value>
+    <description>Source file path that uses dynamic variables and regex to copy the file to HDFS.</description>
+  </property>
+  <property>
+    <name>hive_tar_destination_folder</name>
+    <value>glusterfs:///apps/{{ hdp_stack_version }}/hive/</value>
+    <description>Destination HDFS folder for the file.</description>
+  </property>
+
+  <!-- Pig tarball is needed by WebHCat. -->
+  <property>
+    <name>pig_tar_source</name>
+    <value>/usr/hdp/current/pig-client/pig.tar.gz</value>
+    <description>Source file path that uses dynamic variables and regex to copy the file to HDFS.</description>
+  </property>
+  <property>
+    <name>pig_tar_destination_folder</name>
+    <value>glusterfs:///apps/{{ hdp_stack_version }}/pig/</value>
+    <description>Destination HDFS folder for the file.</description>
+  </property>
+
+  <!-- Hadoop Streaming jar is needed by WebHCat. -->
+  <property>
+    <name>hadoop-streaming_tar_source</name>
+    <value>/usr/hdp/current/hadoop-mapreduce-client/hadoop-streaming.jar</value>
+    <description>Source file path that uses dynamic variables and regex to copy the file to HDFS.</description>
+  </property>
+  <property>
+    <name>hadoop-streaming_tar_destination_folder</name>
+    <value>glusterfs:///apps/{{ hdp_stack_version }}/mapreduce/</value>
+    <description>Destination HDFS folder for the file.</description>
+  </property>
+
+  <!-- Sqoop tarball is needed by WebHCat. -->
+  <property>
+    <name>sqoop_tar_source</name>
+    <value>/usr/hdp/current/sqoop-client/sqoop.tar.gz</value>
+    <description>Source file path that uses dynamic variables and regex to copy the file to HDFS.</description>
+  </property>
+  <property>
+    <name>sqoop_tar_destination_folder</name>
+    <value>glusterfs:///apps/{{ hdp_stack_version }}/sqoop/</value>
+    <description>Destination HDFS folder for the file.</description>
+  </property>
+
+  <!-- MapReduce2 tarball -->
+  <property>
+    <name>mapreduce_tar_source</name>
+    <value>/usr/hdp/current/hadoop-client/mapreduce.tar.gz</value>
+    <description>Source file path that uses dynamic variables and regex to copy the file to HDFS.</description>
+  </property>
+  <property>
+    <name>mapreduce_tar_destination_folder</name>
+    <value>glusterfs:///apps/{{ hdp_stack_version }}/mapreduce/</value>
+    <description>Destination HDFS folder for the file.</description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/configuration/cluster-env.xml.noversion
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/configuration/cluster-env.xml.noversion b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/configuration/cluster-env.xml.noversion
new file mode 100644
index 0000000..d41ff98
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/configuration/cluster-env.xml.noversion
@@ -0,0 +1,56 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+    <property>
+        <name>security_enabled</name>
+        <value>false</value>
+        <description>Hadoop Security</description>
+    </property>
+    <property>
+        <name>kerberos_domain</name>
+        <value>EXAMPLE.COM</value>
+        <description>Kerberos realm.</description>
+    </property>
+    <property>
+        <name>ignore_groupsusers_create</name>
+        <value>false</value>
+        <description>Whether to ignore failures on users and group creation</description>
+    </property>
+    <property>
+        <name>smokeuser</name>
+        <value>ambari-qa</value>
+        <property-type>USER</property-type>
+        <description>User executing service checks</description>
+    </property>
+    <property>
+        <name>smokeuser_keytab</name>
+        <value>/etc/security/keytabs/smokeuser.headless.keytab</value>
+        <description>Path to smoke test user keytab file</description>
+    </property>
+    <property>
+        <name>user_group</name>
+        <value>hadoop</value>
+        <property-type>GROUP</property-type>
+        <description>Hadoop user group.</description>
+    </property>
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/configuration/cluster-env.xml.version
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/configuration/cluster-env.xml.version b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/configuration/cluster-env.xml.version
new file mode 100644
index 0000000..485125c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/configuration/cluster-env.xml.version
@@ -0,0 +1,107 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+
+  <!-- The properties that end in tar_source describe the pattern of where the tar.gz files come from.
+  They will replace {{ hdp_stack_version }} with the "#.#.#.#" value followed by -* (which is the build number in HDP 2.2).
+  When copying those tarballs, Ambari will look up the corresponding tar_destination_folder property to know where it
+  should be copied to.
+  All of the destination folders must begin with hdfs://
+  Please note that the spaces inside of {{ ... }} are important.
+
+  IMPORTANT: Any properties included here must also be declared in site_properties.js
+
+  -->
+  <!-- Tez tarball is needed by Hive Server when using the Tez execution engine. -->
+  <property>
+    <name>tez_tar_source</name>
+    <value>/usr/hdp/current/tez-client/lib/tez.tar.gz</value>
+    <description>Source file path that uses dynamic variables and regex to copy the file to HDFS.</description>
+  </property>
+  <property>
+    <name>tez_tar_destination_folder</name>
+    <value>glusterfs:///apps/{{ hdp_stack_version }}/tez/</value>
+    <description>Destination HDFS folder for the file.</description>
+  </property>
+
+  <!-- Hive tarball is needed by WebHCat. -->
+  <property>
+    <name>hive_tar_source</name>
+    <value>/usr/hdp/current/hive-client/hive.tar.gz</value>
+    <description>Source file path that uses dynamic variables and regex to copy the file to HDFS.</description>
+  </property>
+  <property>
+    <name>hive_tar_destination_folder</name>
+    <value>glusterfs:///apps/{{ hdp_stack_version }}/hive/</value>
+    <description>Destination HDFS folder for the file.</description>
+  </property>
+
+  <!-- Pig tarball is needed by WebHCat. -->
+  <property>
+    <name>pig_tar_source</name>
+    <value>/usr/hdp/current/pig-client/pig.tar.gz</value>
+    <description>Source file path that uses dynamic variables and regex to copy the file to HDFS.</description>
+  </property>
+  <property>
+    <name>pig_tar_destination_folder</name>
+    <value>glusterfs:///apps/{{ hdp_stack_version }}/pig/</value>
+    <description>Destination HDFS folder for the file.</description>
+  </property>
+
+  <!-- Hadoop Streaming jar is needed by WebHCat. -->
+  <property>
+    <name>hadoop-streaming_tar_source</name>
+    <value>/usr/hdp/current/hadoop-mapreduce-client/hadoop-streaming.jar</value>
+    <description>Source file path that uses dynamic variables and regex to copy the file to HDFS.</description>
+  </property>
+  <property>
+    <name>hadoop-streaming_tar_destination_folder</name>
+    <value>glusterfs:///apps/{{ hdp_stack_version }}/mapreduce/</value>
+    <description>Destination HDFS folder for the file.</description>
+  </property>
+
+  <!-- Sqoop tarball is needed by WebHCat. -->
+  <property>
+    <name>sqoop_tar_source</name>
+    <value>/usr/hdp/current/sqoop-client/sqoop.tar.gz</value>
+    <description>Source file path that uses dynamic variables and regex to copy the file to HDFS.</description>
+  </property>
+  <property>
+    <name>sqoop_tar_destination_folder</name>
+    <value>glusterfs:///apps/{{ hdp_stack_version }}/sqoop/</value>
+    <description>Destination HDFS folder for the file.</description>
+  </property>
+
+  <!-- MapReduce2 tarball -->
+  <property>
+    <name>mapreduce_tar_source</name>
+    <value>/usr/hdp/current/hadoop-client/mapreduce.tar.gz</value>
+    <description>Source file path that uses dynamic variables and regex to copy the file to HDFS.</description>
+  </property>
+  <property>
+    <name>mapreduce_tar_destination_folder</name>
+    <value>glusterfs:///apps/{{ hdp_stack_version }}/mapreduce/</value>
+    <description>Destination HDFS folder for the file.</description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/metainfo.xml
new file mode 100644
index 0000000..24eff8e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/metainfo.xml
@@ -0,0 +1,23 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <versions>
+	  <active>false</active>
+    </versions>
+    <extends>2.2</extends>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/repos/repoinfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/repos/repoinfo.xml
new file mode 100644
index 0000000..646b2c0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/repos/repoinfo.xml
@@ -0,0 +1,80 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<reposinfo>
+  <latest>http://public-repo-1.hortonworks.com/HDP/hdp_urlinfo.json</latest>
+  <os family="redhat6">
+    <repo>
+      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.3.0.0</baseurl>
+      <repoid>HDP-2.3</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6</baseurl>
+      <repoid>HDP-UTILS-1.1.0.20</repoid>
+      <reponame>HDP-UTILS</reponame>
+    </repo>
+  </os>
+  <os family="redhat5">
+    <repo>
+      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos5/2.x/updates/2.3.0.0</baseurl>
+      <repoid>HDP-2.3</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos5</baseurl>
+      <repoid>HDP-UTILS-1.1.0.20</repoid>
+      <reponame>HDP-UTILS</reponame>
+    </repo>
+  </os>
+  <os family="suse11">
+    <repo>
+      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/suse11sp3/2.x/updates/2.3.0.0</baseurl>
+      <repoid>HDP-2.3</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/suse11sp3</baseurl>
+      <repoid>HDP-UTILS-1.1.0.20</repoid>
+      <reponame>HDP-UTILS</reponame>
+    </repo>
+  </os>
+  <os family="ubuntu12">
+    <repo>
+      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/ubuntu12/2.x/updates/2.3.0.0</baseurl>
+      <repoid>HDP-2.3</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/ubuntu12</baseurl>
+      <repoid>HDP-UTILS-1.1.0.20</repoid>
+      <reponame>HDP-UTILS</reponame>
+    </repo>
+  </os>
+  <os family="ubuntu7">
+    <repo>
+      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/debian6/2.x/updates/2.3.0.0</baseurl>
+      <repoid>HDP-2.3</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/debian6</baseurl>
+      <repoid>HDP-UTILS-1.1.0.20</repoid>
+      <reponame>HDP-UTILS</reponame>
+    </repo>
+  </os>
+</reposinfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/role_command_order.json b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/role_command_order.json
new file mode 100644
index 0000000..ead3dd4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/role_command_order.json
@@ -0,0 +1,8 @@
+{
+  "_comment" : "Record format:",
+  "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
+  "general_deps" : {
+    "_comment" : "dependencies for all cases",
+    "MAHOUT_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ACCUMULO/configuration/accumulo-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ACCUMULO/configuration/accumulo-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ACCUMULO/configuration/accumulo-log4j.xml
new file mode 100644
index 0000000..ef119f8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ACCUMULO/configuration/accumulo-log4j.xml
@@ -0,0 +1,112 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+
+  <property>
+    <name>audit_log_level</name>
+    <value>OFF</value>
+    <description>Log level for audit logging</description>
+  </property>
+
+  <property>
+    <name>monitor_forwarding_log_level</name>
+    <value>WARN</value>
+    <description>Log level for logging forwarded to the Accumulo
+      Monitor</description>
+  </property>
+
+  <property>
+    <name>debug_log_size</name>
+    <value>1000M</value>
+    <description>Size of each debug rolling log file</description>
+  </property>
+
+  <property>
+    <name>debug_num_logs</name>
+    <value>10</value>
+    <description>Number of rolling debug log files to keep</description>
+  </property>
+
+  <property>
+    <name>info_log_size</name>
+    <value>1000M</value>
+    <description>Size of each info rolling log file</description>
+  </property>
+
+  <property>
+    <name>info_num_logs</name>
+    <value>10</value>
+    <description>Number of rolling info log files to keep</description>
+  </property>
+
+  <property>
+    <name>content</name>
+    <description>Custom log4j.properties</description>
+    <value>
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# default logging properties:
+#  by default, log everything at INFO or higher to the console
+log4j.rootLogger=INFO,A1
+
+# hide Jetty junk
+log4j.logger.org.mortbay.log=WARN,A1
+
+# hide "Got brand-new compressor" messages
+log4j.logger.org.apache.hadoop.io.compress=WARN,A1
+log4j.logger.org.apache.accumulo.core.file.rfile.bcfile.Compression=WARN,A1
+
+# hide junk from TestRandomDeletes
+log4j.logger.org.apache.accumulo.test.TestRandomDeletes=WARN,A1
+
+# hide junk from VFS
+log4j.logger.org.apache.commons.vfs2.impl.DefaultFileSystemManager=WARN,A1
+
+# hide almost everything from zookeeper
+log4j.logger.org.apache.zookeeper=ERROR,A1
+
+# hide AUDIT messages in the shell, alternatively you could send them to a different logger
+log4j.logger.org.apache.accumulo.shell.Shell.audit=WARN,A1
+
+# Send most things to the console
+log4j.appender.A1=org.apache.log4j.ConsoleAppender
+log4j.appender.A1.layout.ConversionPattern=%d{ISO8601} [%-8c{2}] %-5p: %m%n
+log4j.appender.A1.layout=org.apache.log4j.PatternLayout
+
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ACCUMULO/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ACCUMULO/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ACCUMULO/kerberos.json
new file mode 100644
index 0000000..3a3ecc3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ACCUMULO/kerberos.json
@@ -0,0 +1,145 @@
+{
+  "services": [
+    {
+      "name": "ACCUMULO",
+      "identities": [
+        {
+          "name": "accumulo",
+          "principal": {
+            "value": "${accumulo-env/accumulo_user}@${realm}",
+            "type" : "user",
+            "configuration": "accumulo-env/accumulo_principal_name",
+            "local_username": "${accumulo-env/accumulo_user}"
+          },
+          "keytab": {
+            "file": "${keytab_dir}/accumulo.headless.keytab",
+            "owner": {
+              "name": "${accumulo-env/accumulo_user}",
+              "access": "r"
+            },
+            "group": {
+              "name": "${cluster-env/user_group}",
+              "access": "r"
+            },
+            "configuration": "accumulo-env/accumulo_user_keytab"
+          }
+        },
+        {
+          "name": "accumulo_service",
+          "principal": {
+            "value": "${accumulo-env/accumulo_user}/_HOST@${realm}",
+            "type" : "service",
+            "configuration": "accumulo-site/general.kerberos.principal",
+            "local_username": "${accumulo-env/accumulo_user}"
+          },
+          "keytab": {
+            "file": "${keytab_dir}/accumulo.service.keytab",
+            "owner": {
+              "name": "${accumulo-env/accumulo_user}",
+              "access": "r"
+            },
+            "group": {
+              "name": "${cluster-env/user_group}",
+              "access": ""
+            },
+            "configuration": "accumulo-site/general.kerberos.keytab"
+          }
+        },
+        {
+          "name": "accumulo_tracer",
+          "principal": {
+            "value": "tracer@${realm}",
+            "type" : "user",
+            "configuration": "accumulo-site/trace.user",
+            "local_username": "${accumulo-env/accumulo_user}"
+          },
+          "keytab": {
+            "file": "${keytab_dir}/accumulo-tracer.headless.keytab",
+            "owner": {
+              "name": "${accumulo-env/accumulo_user}",
+              "access": "r"
+            },
+            "group": {
+              "name": "${cluster-env/user_group}",
+              "access": ""
+            },
+            "configuration": "accumulo-site/trace.token.property.keytab"
+          }
+        },
+        {
+          "name": "/hdfs"
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "configurations": [
+        {
+          "accumulo-site": {
+            "instance.rpc.sasl.enabled": "true",
+            "instance.security.authenticator": "org.apache.accumulo.server.security.handler.KerberosAuthenticator",
+            "instance.security.authorizor": "org.apache.accumulo.server.security.handler.KerberosAuthorizor",
+            "instance.security.permissionHandler": "org.apache.accumulo.server.security.handler.KerberosPermissionHandler",
+            "trace.token.type": "org.apache.accumulo.core.client.security.tokens.KerberosToken",
+            "general.delegation.token.lifetime": "7d",
+            "general.delegation.token.update.interval": "1d"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "ACCUMULO_MASTER",
+          "identities": [
+            {
+              "name": "./accumulo_service"
+            }
+          ]
+        },
+        {
+          "name": "ACCUMULO_TSERVER",
+          "identities": [
+            {
+              "name": "./accumulo_service"
+            }
+          ]
+        },
+        {
+          "name": "ACCUMULO_MONITOR",
+          "identities": [
+            {
+              "name": "./accumulo_service"
+            },
+            {
+              "name": "./accumulo_tracer"
+            }
+          ]
+        },
+        {
+          "name": "ACCUMULO_GC",
+          "identities": [
+            {
+              "name": "./accumulo_service"
+            }
+          ]
+        },
+        {
+          "name": "ACCUMULO_TRACER",
+          "identities": [
+            {
+              "name": "./accumulo_tracer"
+            }
+          ]
+        },
+        {
+          "name": "ACCUMULO_CLIENT",
+          "identities": [
+            {
+              "name": "./accumulo"
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ACCUMULO/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ACCUMULO/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ACCUMULO/metainfo.xml
new file mode 100644
index 0000000..1f2c281
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/ACCUMULO/metainfo.xml
@@ -0,0 +1,49 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ACCUMULO</name>
+      <version>1.7.0.2.3</version>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>redhat5,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>accumulo_2_3_*</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>ubuntu7,ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>accumulo-2-3-.*</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <requiredServices>
+        <service>ZOOKEEPER</service>
+      </requiredServices>
+
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/FALCON/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/FALCON/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/FALCON/metainfo.xml
new file mode 100644
index 0000000..a4704bd
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/FALCON/metainfo.xml
@@ -0,0 +1,44 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>FALCON</name>
+      <version>0.7.0.2.3</version>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>redhat5,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>falcon_2_3_*</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>ubuntu7,ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>falcon-2-3-.*</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/FLUME/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/FLUME/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/FLUME/metainfo.xml
new file mode 100644
index 0000000..d554ec7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/FLUME/metainfo.xml
@@ -0,0 +1,50 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>FLUME</name>
+      <version>1.5.2.2.3</version>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>redhat5,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>flume_2_3_*</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>ubuntu7,ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>flume-2-3-.*</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <requiredServices>
+        <service>GLUSTERFS</service>
+      </requiredServices>
+
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/configuration/core-site.xml
new file mode 100644
index 0000000..a861b5f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/configuration/core-site.xml
@@ -0,0 +1,43 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+ <!--
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+ 
+        http://www.apache.org/licenses/LICENSE-2.0
+ 
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+ -->
+ 
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
+
+<!-- file system properties -->
+
+  <property>
+  <name>fs.AbstractFileSystem.glusterfs.impl</name>
+  <value>org.apache.hadoop.fs.local.GlusterFs</value>
+  </property>
+
+  <property>
+  <name>fs.glusterfs.impl</name>
+  <value>org.apache.hadoop.fs.glusterfs.GlusterFileSystem</value>
+  </property>
+  
+  <property>
+    <name>fs.defaultFS</name>
+    <value>glusterfs:///localhost:8020</value>
+  </property>  
+
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/configuration/hadoop-env.xml
new file mode 100644
index 0000000..bce6b53
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/configuration/hadoop-env.xml
@@ -0,0 +1,194 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+  <property>
+    <name>hadoop_pid_dir_prefix</name>
+    <value>/var/run/hadoop</value>
+    <description>Hadoop PID Dir Prefix</description>
+  </property>
+ <property>
+    <name>hadoop_heapsize</name>
+    <value>1024</value>
+    <description>Hadoop maximum Java heap size</description>
+  </property>
+  <property>
+    <name>glusterfs_user</name>
+    <value>root</value>
+    <description></description>
+  </property>
+  <property>
+    <name>hdfs_log_dir_prefix</name>
+    <value>/var/log/hadoop</value>
+    <description>Hadoop Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>namenode_heapsize</name>
+    <value>1024</value>
+    <description>NameNode Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_host</name>
+    <value></value>
+    <description>NameNode Host.</description>
+  </property>
+  <property>
+    <name>snamenode_host</name>
+    <value></value>
+    <description>Secondary NameNode.</description>
+  </property>
+  <property>
+    <name>proxyuser_group</name>
+    <value>users</value>
+    <description>Proxy user group.</description>
+  </property>
+  <property>
+    <name>hdfs_user</name>
+    <value>hdfs</value>
+    <description>User to run HDFS as</description>
+  </property>
+  <!--
+  <property>
+    <name>user_group</name>
+    <value>hadoop</value>
+    <description>Proxy user group.</description>
+  </property>
+  -->
+    <!-- hadoop-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for hadoop-env.sh file</description>
+    <value>
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.  Required.
+export JAVA_HOME={{java_home}}
+export HADOOP_HOME_WARN_SUPPRESS=1
+
+# Hadoop home directory
+export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
+
+# Hadoop Configuration Directory
+#TODO: if env var set that can cause problems
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
+
+{# this is different for HDP1 #}
+# Path to jsvc required by secure HDP 2.0 datanode
+export JSVC_HOME={{jsvc_path}}
+
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+
+export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+
+# Command specific options appended to HADOOP_OPTS when specified
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
+
+HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
+HADOOP_DATANODE_OPTS="-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+# On secure datanodes, user to run the datanode as after dropping privileges
+export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}
+
+# Extra ssh options.  Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
+
+# History server logs
+export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from.  Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands.  Unset by default.  This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
+export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# History server pid
+export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
+
+YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes.  See 'man nice'.
+
+# export HADOOP_NICENESS=10
+
+# Use libraries from standard classpath
+JAVA_JDBC_LIBS=""
+#Add libraries required by mysql connector
+for jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by oracle connector
+for jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by nodemanager
+MAPREDUCE_LIBS={{mapreduce_libs_path}}
+export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
+
+if [ -d "/usr/lib/tez" ]; then
+  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf
+fi
+
+# Setting path to hdfs command line
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+
+#Mostly required for hadoop 2.0
+export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/metainfo.xml
new file mode 100644
index 0000000..8bf4eb6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/metainfo.xml
@@ -0,0 +1,71 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>GLUSTERFS</name>
+      <displayName>GLUSTERFS</displayName>
+      <comment>An Hadoop Compatible File System</comment>
+      <version>2.1.3.0</version>
+      <components>
+        <component>
+          <name>GLUSTERFS_CLIENT</name>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/glusterfs_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <configFiles>   
+            <configFile>
+              <type>env</type>
+              <fileName>hadoop-env.sh</fileName>
+              <dictionaryName>hadoop-env</dictionaryName>
+            </configFile>
+          </configFiles>                 
+        </component>
+      </components>
+<!--
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any<osFamily>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>glusterfs</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+-->
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>core-site</config-type>
+        <config-type>hadoop-env</config-type>
+        <!--<config-type>hdfs-site</config-type>-->
+      </configuration-dependencies>
+
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/scripts/glusterfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/scripts/glusterfs.py b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/scripts/glusterfs.py
new file mode 100644
index 0000000..8b64c6a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/scripts/glusterfs.py
@@ -0,0 +1,29 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management import *
+
+def glusterfs():
+  import params
+
+  Directory( params.glusterfs_conf_dir
+  )
+
+
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/scripts/glusterfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/scripts/glusterfs_client.py b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/scripts/glusterfs_client.py
new file mode 100644
index 0000000..840c76c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/scripts/glusterfs_client.py
@@ -0,0 +1,34 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import sys
+from resource_management import *
+from glusterfs import glusterfs
+
+class GlusterFSClient(Script):
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    glusterfs()
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+  GlusterFSClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/scripts/params.py
new file mode 100644
index 0000000..6d88109
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/scripts/params.py
@@ -0,0 +1,29 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+
+#glusterfs_home = '/usr/lib/glusterfs'
+glusterfs_conf_dir = '/etc/glusterfs'
+log_dir = '/var/log/glusterfs'
+java64_home = config['hostLevelParams']['java_home']
+hadoop_home = "/usr"

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/scripts/service_check.py
new file mode 100644
index 0000000..6619a73
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/scripts/service_check.py
@@ -0,0 +1,37 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+class GlusterFSServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    Execute(format("env  GLUSTERFS_LOG_DIR=/var/log/glusterfs "
+                   "GLUSTERFS_PID_DIR=/var/run/glusterfs "
+                   "glusterd --version"),
+            logoutput=True,
+            tries = 3,
+            try_sleep = 20
+    )
+
+if __name__ == "__main__":
+  GlusterFSServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/templates/glusterfs-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/templates/glusterfs-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/templates/glusterfs-env.sh.j2
new file mode 100644
index 0000000..1f4c746
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/templates/glusterfs-env.sh.j2
@@ -0,0 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+JAVA_HOME={{java64_home}}
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/templates/glusterfs.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/templates/glusterfs.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/templates/glusterfs.properties.j2
new file mode 100644
index 0000000..1bf6e1d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/GLUSTERFS/package/templates/glusterfs.properties.j2
@@ -0,0 +1,36 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+# GlusterFS configuration file. All values can be overwritten by command line arguments.
+
+
+
+# load jarfile, colon separated
+#jar=/usr/lib/hadoop/lib
+
+#verbose print all log messages to screen (default to print only INFO and above to screen)
+#verbose=true
+
+#exectype local|mapreduce, mapreduce is default
+#exectype=mapreduce
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/HBASE/configuration/hbase-site.xml
new file mode 100644
index 0000000..cf8ddd3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/HBASE/configuration/hbase-site.xml
@@ -0,0 +1,370 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>hbase.rootdir</name>
+    <value>glusterfs:///hbase</value>
+    <description>The directory shared by region servers and into
+    which HBase persists.  The URL should be 'fully-qualified'
+    to include the filesystem scheme.  For example, to specify the
+    HDFS directory '/hbase' where the HDFS instance's namenode is
+    running at namenode.example.org on port 9000, set this value to:
+    hdfs://namenode.example.org:9000/hbase.  By default HBase writes
+    into /tmp.  Change this configuration else all data will be lost
+    on machine restart.
+    </description>
+  </property>
+  <property>
+    <name>hbase.cluster.distributed</name>
+    <value>true</value>
+    <description>The mode the cluster will be in. Possible values are
+      false for standalone mode and true for distributed mode.  If
+      false, startup will run all HBase and ZooKeeper daemons together
+      in the one JVM.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.port</name>
+    <value>60000</value>
+    <description>The port the HBase Master should bind to.</description>
+  </property>
+  <property>
+    <name>hbase.tmp.dir</name>
+    <value>/hadoop/hbase</value>
+    <description>Temporary directory on the local filesystem.
+    Change this setting to point to a location more permanent
+    than '/tmp' (The '/tmp' directory is often cleared on
+    machine restart).
+    </description>
+  </property>
+  <property>
+    <name>hbase.local.dir</name>
+    <value>${hbase.tmp.dir}/local</value>
+    <description>Directory on the local filesystem to be used as a local storage
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.info.bindAddress</name>
+    <value>0.0.0.0</value>
+    <description>The bind address for the HBase Master web UI
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.info.port</name>
+    <value>60010</value>
+    <description>The port for the HBase Master web UI.</description>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port</name>
+    <value>60030</value>
+    <description>The port for the HBase RegionServer web UI.</description>
+  </property>
+  <property>
+    <name>hbase.regionserver.global.memstore.upperLimit</name>
+    <value>0.4</value>
+    <description>Maximum size of all memstores in a region server before new
+      updates are blocked and flushes are forced. Defaults to 40% of heap
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.handler.count</name>
+    <value>60</value>
+    <description>Count of RPC Listener instances spun up on RegionServers.
+    Same property is used by the Master for count of master handlers.
+    Default is 10.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.majorcompaction</name>
+    <value>86400000</value>
+    <description>The time (in milliseconds) between 'major' compactions of all
+    HStoreFiles in a region.  Default: 1 day.
+    Set to 0 to disable automated major compactions.
+    </description>
+  </property>
+  
+  <property>
+    <name>hbase.regionserver.global.memstore.lowerLimit</name>
+    <value>0.38</value>
+    <description>When memstores are being forced to flush to make room in
+      memory, keep flushing until we hit this mark. Defaults to 35% of heap.
+      This value equal to hbase.regionserver.global.memstore.upperLimit causes
+      the minimum possible flushing to occur when updates are blocked due to
+      memstore limiting.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.block.multiplier</name>
+    <value>2</value>
+    <description>Block updates if memstore has hbase.hregion.memstore.block.multiplier
+    time hbase.hregion.flush.size bytes.  Useful preventing
+    runaway memstore during spikes in update traffic.  Without an
+    upper-bound, memstore fills such that when it flushes the
+    resultant flush files take a long time to compact or split, or
+    worse, we OOME
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.flush.size</name>
+    <value>134217728</value>
+    <description>
+    Memstore will be flushed to disk if size of the memstore
+    exceeds this number of bytes.  Value is checked by a thread that runs
+    every hbase.server.thread.wakefrequency.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.mslab.enabled</name>
+    <value>true</value>
+    <description>
+      Enables the MemStore-Local Allocation Buffer,
+      a feature which works to prevent heap fragmentation under
+      heavy write loads. This can reduce the frequency of stop-the-world
+      GC pauses on large heaps.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.max.filesize</name>
+    <value>10737418240</value>
+    <description>
+    Maximum HStoreFile size. If any one of a column families' HStoreFiles has
+    grown to exceed this value, the hosting HRegion is split in two.
+    Default: 1G.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.scanner.caching</name>
+    <value>100</value>
+    <description>Number of rows that will be fetched when calling next
+    on a scanner if it is not served from (local, client) memory. Higher
+    caching values will enable faster scanners but will eat up more memory
+    and some calls of next may take longer and longer times when the cache is empty.
+    Do not set this value such that the time between invocations is greater
+    than the scanner timeout; i.e. hbase.regionserver.lease.period
+    </description>
+  </property>
+  <property>
+    <name>zookeeper.session.timeout</name>
+    <value>30000</value>
+    <description>ZooKeeper session timeout.
+      HBase passes this to the zk quorum as suggested maximum time for a
+      session (This setting becomes zookeeper's 'maxSessionTimeout').  See
+      http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
+      "The client sends a requested timeout, the server responds with the
+      timeout that it can give the client. " In milliseconds.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.keyvalue.maxsize</name>
+    <value>10485760</value>
+    <description>Specifies the combined maximum allowed size of a KeyValue
+    instance. This is to set an upper boundary for a single entry saved in a
+    storage file. Since they cannot be split it helps avoiding that a region
+    cannot be split any further because the data is too large. It seems wise
+    to set this to a fraction of the maximum region size. Setting it to zero
+    or less disables the check.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hstore.compactionThreshold</name>
+    <value>3</value>
+    <description>
+    If more than this number of HStoreFiles in any one HStore
+    (one HStoreFile is written per flush of memstore) then a compaction
+    is run to rewrite all HStoreFiles files as one.  Larger numbers
+    put off compaction but when it runs, it takes longer to complete.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hstore.flush.retries.number</name>
+    <value>120</value>
+    <description>
+    The number of times the region flush operation will be retried.
+    </description>
+  </property>
+  
+  <property>
+    <name>hbase.hstore.blockingStoreFiles</name>
+    <value>10</value>
+    <description>
+    If more than this number of StoreFiles in any one Store
+    (one StoreFile is written per flush of MemStore) then updates are
+    blocked for this HRegion until a compaction is completed, or
+    until hbase.hstore.blockingWaitTime has been exceeded.
+    </description>
+  </property>
+  <property>
+    <name>hfile.block.cache.size</name>
+    <value>0.40</value>
+    <description>
+        Percentage of maximum heap (-Xmx setting) to allocate to block cache
+        used by HFile/StoreFile. Default of 0.25 means allocate 25%.
+        Set to 0 to disable but it's not recommended.
+    </description>
+  </property>
+
+  <!-- The following properties configure authentication information for
+       HBase processes when using Kerberos security.  There are no default
+       values, included here for documentation purposes -->
+  <property>
+    <name>hbase.master.keytab.file</name>
+    <value>/etc/security/keytabs/hbase.service.keytab</value>
+    <description>Full path to the kerberos keytab file to use for logging in
+    the configured HMaster server principal.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.kerberos.principal</name>
+    <value>hbase/_HOST@EXAMPLE.COM</value>
+    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
+    that should be used to run the HMaster process.  The principal name should
+    be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the hostname
+    portion, it will be replaced with the actual hostname of the running
+    instance.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.keytab.file</name>
+    <value>/etc/security/keytabs/hbase.service.keytab</value>
+    <description>Full path to the kerberos keytab file to use for logging in
+    the configured HRegionServer server principal.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.kerberos.principal</name>
+    <value>hbase/_HOST@EXAMPLE.COM</value>
+    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
+    that should be used to run the HRegionServer process.  The principal name
+    should be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the
+    hostname portion, it will be replaced with the actual hostname of the
+    running instance.  An entry for this principal must exist in the file
+    specified in hbase.regionserver.keytab.file
+    </description>
+  </property>
+
+  <!-- Additional configuration specific to HBase security -->
+  <property>
+    <name>hbase.superuser</name>
+    <value>hbase</value>
+    <description>List of users or groups (comma-separated), who are allowed
+    full privileges, regardless of stored ACLs, across the cluster.
+    Only used when HBase security is enabled.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.security.authentication</name>
+    <value>simple</value>
+    <description>  Controls whether or not secure authentication is enabled for HBase. Possible values are 'simple'
+      (no authentication), and 'kerberos'.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.security.authorization</name>
+    <value>false</value>
+    <description>Enables HBase authorization. Set the value of this property to false to disable HBase authorization.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.coprocessor.region.classes</name>
+    <value></value>
+    <description>A comma-separated list of Coprocessors that are loaded by
+    default on all tables. For any override coprocessor method, these classes
+    will be called in order. After implementing your own Coprocessor, just put
+    it in HBase's classpath and add the fully qualified class name here.
+    A coprocessor can also be loaded on demand by setting HTableDescriptor.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.coprocessor.master.classes</name>
+    <value></value>
+    <description>A comma-separated list of
+      org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
+      loaded by default on the active HMaster process. For any implemented
+      coprocessor methods, the listed classes will be called in order. After
+      implementing your own MasterObserver, just put it in HBase's classpath
+      and add the fully qualified class name here.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.zookeeper.property.clientPort</name>
+    <value>2181</value>
+    <description>Property from ZooKeeper's config zoo.cfg.
+    The port at which the clients will connect.
+    </description>
+  </property>
+
+  <!--
+  The following three properties are used together to create the list of
+  host:peer_port:leader_port quorum servers for ZooKeeper.
+  -->
+  <property>
+    <name>hbase.zookeeper.quorum</name>
+    <value>localhost</value>
+    <description>Comma separated list of servers in the ZooKeeper Quorum.
+    For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
+    By default this is set to localhost for local and pseudo-distributed modes
+    of operation. For a fully-distributed setup, this should be set to a full
+    list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
+    this is the list of servers which we will start/stop ZooKeeper on.
+    </description>
+  </property>
+  <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
+
+  <property>
+    <name>hbase.zookeeper.useMulti</name>
+    <value>true</value>
+    <description>Instructs HBase to make use of ZooKeeper's multi-update functionality.
+    This allows certain ZooKeeper operations to complete more quickly and prevents some issues
+    with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).ยท
+    IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+
+    and will not be downgraded.  ZooKeeper versions before 3.4 do not support multi-update and will
+    not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).
+    </description>
+  </property>
+  <property>
+    <name>zookeeper.znode.parent</name>
+    <value>/hbase-unsecure</value>
+    <description>Root ZNode for HBase in ZooKeeper. All of HBase's ZooKeeper
+      files that are configured with a relative path will go under this node.
+      By default, all of HBase's ZooKeeper file path are configured with a
+      relative path, so they will all go under this directory unless changed.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.defaults.for.version.skip</name>
+    <value>true</value>
+    <description>Disables version verification.</description>
+  </property>
+
+  <property>
+    <name>dfs.domain.socket.path</name>
+    <value>/var/lib/hadoop-hdfs/dn_socket</value>
+    <description>Path to domain socket.</description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/HBASE/metainfo.xml
new file mode 100644
index 0000000..0175842
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/HBASE/metainfo.xml
@@ -0,0 +1,56 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HBASE</name>
+      <version>0.98.4.2.3</version>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>redhat5,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>hbase_2_3_*</name>
+            </package>
+            <package>
+              <name>phoenix_2_3_*</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>ubuntu7,ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>hbase-2-3-.*</name>
+            </package>
+            <package>
+              <name>phoenix-2-3-.*</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <requiredServices>
+        <service>ZOOKEEPER</service>
+      </requiredServices>
+
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/40b8b2b4/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/HDFS/metainfo.xml
new file mode 100644
index 0000000..b7f88be
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.GlusterFS/services/HDFS/metainfo.xml
@@ -0,0 +1,92 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HDFS</name>
+      <version>2.7.0.2.3</version>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>redhat5,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>hadoop_2_3_*</name>
+            </package>
+            <package>
+              <name>snappy</name>
+            </package>
+            <package>
+              <name>snappy-devel</name>
+            </package>
+            <package>
+              <name>lzo</name>
+            </package>
+            <package>
+              <name>hadooplzo_2_3_*</name>
+            </package>
+            <package>
+              <name>hadoop_2_3_*-libhdfs</name>
+            </package>
+            <package>
+              <name>ambari-log4j</name>
+            </package>
+          </packages>
+        </osSpecific>
+
+        <osSpecific>
+          <osFamily>ubuntu7,ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>hadoop-2-3-.*-client</name>
+            </package>
+            <package>
+              <name>hadoop-2-3-.*-hdfs-datanode</name>
+            </package>
+            <package>
+              <name>hadoop-2-3-.*-hdfs-journalnode</name>
+            </package>
+            <package>
+              <name>hadoop-2-3-.*-hdfs-namenode</name>
+            </package>
+            <package>
+              <name>hadoop-2-3-.*-hdfs-secondarynamenode</name>
+            </package>
+            <package>
+              <name>hadoop-2-3-.*-hdfs-zkfc</name>
+            </package>
+            <package>
+              <name>libsnappy1</name>
+            </package>
+            <package>
+              <name>libsnappy-dev</name>
+            </package>
+            <package>
+              <name>hadooplzo-2-3-.*</name>
+            </package>
+            <package>
+              <name>libhdfs0-2-3-.*</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+    </service>
+  </services>
+</metainfo>