You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by yu...@apache.org on 2013/11/15 20:12:23 UTC

[02/14] AMBARI-3777. Remove HDPLocal stack from stack definition. (yusaku)

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HDFS/configuration/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HDFS/configuration/hadoop-policy.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HDFS/configuration/hadoop-policy.xml
deleted file mode 100644
index 51b01bb..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HDFS/configuration/hadoop-policy.xml
+++ /dev/null
@@ -1,134 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientProtocol, which is used by user code
-    via the DistributedFileSystem.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.client.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
-    for block recovery.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for DatanodeProtocol, which is used by datanodes to
-    communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
-    for updating generation timestamp.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.namenode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for NamenodeProtocol, the protocol used by the secondary
-    namenode to communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.tracker.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterTrackerProtocol, used by the tasktrackers to
-    communicate with the jobtracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.job.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for JobSubmissionProtocol, used by job clients to
-    communciate with the jobtracker for job submission, querying job status etc.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.job.task.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
-    tasks to communicate with the parent tasktracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
- <property>
-    <name>security.admin.operations.protocol.acl</name>
-    <value>hadoop</value>
-    <description>ACL for AdminOperationsProtocol. Used for admin commands.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.refresh.usertogroups.mappings.protocol.acl</name>
-    <value>hadoop</value>
-    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
-    users mappings. The ACL is a comma-separated list of user and
-    group names. The user and group list is separated by a blank. For
-    e.g. "alice,bob users,wheel".  A special value of "*" means all
-    users are allowed.</description>
-  </property>
-
-<property>
-    <name>security.refresh.policy.protocol.acl</name>
-    <value>hadoop</value>
-    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
-    dfsadmin and mradmin commands to refresh the security policy in-effect.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HDFS/configuration/hdfs-site.xml
deleted file mode 100644
index d1f271e..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HDFS/configuration/hdfs-site.xml
+++ /dev/null
@@ -1,484 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-  <!-- file system properties -->
-
-  <property>
-    <name>dfs.namenode.name.dir</name>
-    <!-- cluster variant -->
-    <value>/hadoop/hdfs/namenode</value>
-    <description>Determines where on the local filesystem the DFS name node
-      should store the name table.  If this is a comma-delimited list
-      of directories then the name table is replicated in all of the
-      directories, for redundancy. </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.support.append</name>
-    <value>true</value>
-    <description>to enable dfs append</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.webhdfs.enabled</name>
-    <value>true</value>
-    <description>Whether to enable WebHDFS feature</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.datanode.failed.volumes.tolerated</name>
-    <value>0</value>
-    <description> Number of failed disks a DataNode would tolerate before it stops offering service</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.datanode.data.dir</name>
-    <value>/hadoop/hdfs/data</value>
-    <description>Determines where on the local filesystem an DFS data node
-      should store its blocks.  If this is a comma-delimited
-      list of directories, then data will be stored in all named
-      directories, typically on different devices.
-      Directories that do not exist are ignored.
-    </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.hosts.exclude</name>
-    <value>/etc/hadoop/conf/dfs.exclude</value>
-    <description>Names a file that contains a list of hosts that are
-      not permitted to connect to the namenode.  The full pathname of the
-      file must be specified.  If the value is empty, no hosts are
-      excluded.</description>
-  </property>
-
-  <!--
-    <property>
-      <name>dfs.hosts</name>
-      <value>/etc/hadoop/conf/dfs.include</value>
-      <description>Names a file that contains a list of hosts that are
-      permitted to connect to the namenode. The full pathname of the file
-      must be specified.  If the value is empty, all hosts are
-      permitted.</description>
-    </property>
-  -->
-
-  <property>
-    <name>dfs.namenode.checkpoint.dir</name>
-    <value>/hadoop/hdfs/namesecondary</value>
-    <description>Determines where on the local filesystem the DFS secondary
-      name node should store the temporary images to merge.
-      If this is a comma-delimited list of directories then the image is
-      replicated in all of the directories for redundancy.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.checkpoint.edits.dir</name>
-    <value>${dfs.namenode.checkpoint.dir}</value>
-    <description>Determines where on the local filesystem the DFS secondary
-      name node should store the temporary edits to merge.
-      If this is a comma-delimited list of directoires then teh edits is
-      replicated in all of the directoires for redundancy.
-      Default value is same as dfs.namenode.checkpoint.dir
-    </description>
-  </property>
-
-
-  <property>
-    <name>dfs.namenode.checkpoint.period</name>
-    <value>21600</value>
-    <description>The number of seconds between two periodic checkpoints.
-    </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.size</name>
-    <value>67108864</value>
-    <description>The size of the current edit log (in bytes) that triggers
-      a periodic checkpoint even if the maximum checkpoint delay is not reached
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.replication.max</name>
-    <value>50</value>
-    <description>Maximal block replication.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.replication</name>
-    <value>3</value>
-    <description>Default block replication.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.heartbeat.interval</name>
-    <value>3</value>
-    <description>Determines datanode heartbeat interval in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.heartbeat.interval</name>
-    <value>3</value>
-    <description>Determines datanode heartbeat interval in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.safemode.threshold-pct</name>
-    <value>1.0f</value>
-    <description>
-      Specifies the percentage of blocks that should satisfy
-      the minimal replication requirement defined by dfs.namenode.replication.min.
-      Values less than or equal to 0 mean not to start in safe mode.
-      Values greater than 1 will make safe mode permanent.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.balance.bandwidthPerSec</name>
-    <value>6250000</value>
-    <description>
-      Specifies the maximum amount of bandwidth that each datanode
-      can utilize for the balancing purpose in term of
-      the number of bytes per second.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.https.port</name>
-    <value>50470</value>
-    <description>
-      This property is used by HftpFileSystem.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.address</name>
-    <value>0.0.0.0:50010</value>
-  </property>
-
-  <property>
-    <name>dfs.datanode.http.address</name>
-    <value>0.0.0.0:50075</value>
-  </property>
-
-  <property>
-    <name>dfs.blocksize</name>
-    <value>134217728</value>
-    <description>The default block size for new files.</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.http-address</name>
-    <value>localhost:50070</value>
-    <description>The name of the default file system.  Either the
-      literal string "local" or a host:port for NDFS.</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.datanode.du.reserved</name>
-    <!-- cluster variant -->
-    <value>1073741824</value>
-    <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.ipc.address</name>
-    <value>0.0.0.0:8010</value>
-    <description>
-      The datanode ipc server address and port.
-      If the port is 0 then the server will start on a free port.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.blockreport.initialDelay</name>
-    <value>120</value>
-    <description>Delay for first block report in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.handler.count</name>
-    <value>40</value>
-    <description>The number of server threads for the namenode.</description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.max.transfer.threads</name>
-    <value>1024</value>
-    <description>PRIVATE CONFIG VARIABLE</description>
-  </property>
-
-  <!-- Permissions configuration -->
-
-  <property>
-    <name>fs.permissions.umask-mode</name>
-    <value>022</value>
-    <description>
-      The octal umask used when creating files and directories.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.permissions.enabled</name>
-    <value>true</value>
-    <description>
-      If "true", enable permission checking in HDFS.
-      If "false", permission checking is turned off,
-      but all other behavior is unchanged.
-      Switching from one parameter value to the other does not change the mode,
-      owner or group of files or directories.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.permissions.superusergroup</name>
-    <value>hdfs</value>
-    <description>The name of the group of super-users.</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.handler.count</name>
-    <value>100</value>
-    <description>Added to grow Queue size so that more client connections are allowed</description>
-  </property>
-
-  <property>
-    <name>dfs.block.access.token.enable</name>
-    <value>true</value>
-    <description>
-      If "true", access tokens are used as capabilities for accessing datanodes.
-      If "false", no access tokens are checked on accessing datanodes.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.kerberos.principal</name>
-    <value></value>
-    <description>
-      Kerberos principal name for the NameNode
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.kerberos.principal</name>
-    <value></value>
-    <description>
-      Kerberos principal name for the secondary NameNode.
-    </description>
-  </property>
-
-
-  <!--
-    This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
-  -->
-  <property>
-    <name>dfs.namenode.kerberos.https.principal</name>
-    <value></value>
-    <description>The Kerberos principal for the host that the NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.kerberos.https.principal</name>
-    <value></value>
-    <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <!-- cluster variant -->
-    <name>dfs.namenode.secondary.http-address</name>
-    <value>localhost:50090</value>
-    <description>Address of secondary namenode web server</description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.principal</name>
-    <value></value>
-    <description>
-      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
-      HTTP SPENGO specification.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.keytab</name>
-    <value></value>
-    <description>
-      The Kerberos keytab file with the credentials for the
-      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.kerberos.principal</name>
-    <value></value>
-    <description>
-      The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.keytab.file</name>
-    <value></value>
-    <description>
-      Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.keytab.file</name>
-    <value></value>
-    <description>
-      Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.keytab.file</name>
-    <value></value>
-    <description>
-      The filename of the keytab file for the DataNode.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.https-address</name>
-    <value>localhost:50470</value>
-    <description>The https address where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.datanode.data.dir.perm</name>
-    <value>750</value>
-    <description>The permissions that should be there on dfs.datanode.data.dir
-      directories. The datanode will not come up if the permissions are
-      different on existing dfs.datanode.data.dir directories. If the directories
-      don't exist, they will be created with this permission.</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.accesstime.precision</name>
-    <value>0</value>
-    <description>The access time for HDFS file is precise upto this value.
-      The default value is 1 hour. Setting a value of 0 disables
-      access times for HDFS.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.cluster.administrators</name>
-    <value> hdfs</value>
-    <description>ACL for who all can view the default servlets in the HDFS</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.avoid.read.stale.datanode</name>
-    <value>true</value>
-    <description>
-      Indicate whether or not to avoid reading from stale datanodes whose
-      heartbeat messages have not been received by the namenode for more than a
-      specified time interval.
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.avoid.write.stale.datanode</name>
-    <value>true</value>
-    <description>
-      Indicate whether or not to avoid writing to stale datanodes whose
-      heartbeat messages have not been received by the namenode for more than a
-      specified time interval.
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.write.stale.datanode.ratio</name>
-    <value>1.0f</value>
-    <description>When the ratio of number stale datanodes to total datanodes marked is greater
-      than this ratio, stop avoiding writing to stale nodes so as to prevent causing hotspots.
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.stale.datanode.interval</name>
-    <value>30000</value>
-    <description>Datanode is stale after not getting a heartbeat in this interval in ms</description>
-  </property>
-
-  <property>
-    <name>dfs.journalnode.http-address</name>
-    <value>0.0.0.0:8480</value>
-    <description>The address and port the JournalNode web UI listens on.
-      If the port is 0 then the server will start on a free port. </description>
-  </property>
-
-  <property>
-    <name>dfs.journalnode.edits.dir</name>
-    <value>/grid/0/hdfs/journal</value>
-    <description>The path where the JournalNode daemon will store its local state. </description>
-  </property>
-
-  <!-- HDFS Short-Circuit Local Reads -->
-
-  <property>
-    <name>dfs.client.read.shortcircuit</name>
-    <value>true</value>
-    <description>
-      This configuration parameter turns on short-circuit local reads.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.client.read.shortcircuit.skip.checksum</name>
-    <value></value>
-    <description>Enable/disbale skipping the checksum check</description>
-  </property>
-
-  <property>
-    <name>dfs.domain.socket.path</name>
-    <value>/var/lib/hadoop-hdfs/dn_socket</value>
-  </property>
-
-  <property>
-    <name>dfs.client.read.shortcircuit.streams.cache.size</name>
-    <value>4096</value>
-    <description>
-      The DFSClient maintains a cache of recently opened file descriptors. This
-      parameter controls the size of that cache. Setting this higher will use
-      more file descriptors, but potentially provide better performance on
-      workloads involving lots of seeks.
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HDFS/metainfo.xml
deleted file mode 100644
index 19ac76b..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HDFS/metainfo.xml
+++ /dev/null
@@ -1,60 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Apache Hadoop Distributed File System</comment>
-    <version>2.1.0.2.0.6.0</version>
-
-    <components>
-        <component>
-            <name>NAMENODE</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>DATANODE</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>SECONDARY_NAMENODE</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>HDFS_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-        
-        <component>
-            <name>JOURNALNODE</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-          <name>ZKFC</name>
-          <category>SLAVE</category>
-        </component>
-    </components>
-    <configuration-dependencies>
-      <config-type>core-site</config-type>
-      <config-type>global</config-type>
-      <config-type>hdfs-site</config-type>
-      <config-type>hadoop-policy</config-type>
-    </configuration-dependencies>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HIVE/configuration/hive-site.xml
deleted file mode 100644
index 2a3f5d4..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HIVE/configuration/hive-site.xml
+++ /dev/null
@@ -1,260 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<configuration>
-  <property>
-    <name>javax.jdo.option.ConnectionURL</name>
-    <value>jdbc</value>
-    <description>JDBC connect string for a JDBC metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionDriverName</name>
-    <value>com.mysql.jdbc.Driver</value>
-    <description>Driver class name for a JDBC metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionUserName</name>
-    <value>hive</value>
-    <description>username to use against metastore database</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionPassword</name>
-    <value> </value>
-    <description>password to use against metastore database</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.warehouse.dir</name>
-    <value>/apps/hive/warehouse</value>
-    <description>location of default database for the warehouse</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.sasl.enabled</name>
-    <value></value>
-    <description>If true, the metastore thrift interface will be secured with SASL.
-      Clients must authenticate with Kerberos.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.kerberos.keytab.file</name>
-    <value></value>
-    <description>The path to the Kerberos Keytab file containing the metastore
-      thrift server's service principal.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.kerberos.principal</name>
-    <value></value>
-    <description>The service principal for the metastore thrift server. The special
-      string _HOST will be replaced automatically with the correct host name.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.cache.pinobjtypes</name>
-    <value>Table,Database,Type,FieldSchema,Order</value>
-    <description>List of comma separated metastore object types that should be pinned in the cache</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.uris</name>
-    <value>thrift://localhost:9083</value>
-    <description>URI for client to contact metastore server</description>
-  </property>
-
-  <property>
-    <name>hive.semantic.analyzer.factory.impl</name>
-    <value>org.apache.hivealog.cli.HCatSemanticAnalyzerFactory</value>
-    <description>controls which SemanticAnalyzerFactory implemenation class is used by CLI</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.client.socket.timeout</name>
-    <value>60</value>
-    <description>MetaStore Client socket timeout in seconds</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.execute.setugi</name>
-    <value>true</value>
-    <description>In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.</description>
-  </property>
-
-  <property>
-    <name>hive.security.authorization.enabled</name>
-    <value>false</value>
-    <description>enable or disable the hive client authorization</description>
-  </property>
-
-  <property>
-    <name>hive.security.authorization.manager</name>
-    <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
-    <description>the hive client authorization manager class name.
-      The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
-  </property>
-
-  <property>
-    <name>hive.security.metastore.authorization.manager</name>
-    <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
-    <description>The authorization manager class name to be used in the metastore for authorization. The user-defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider.  </description>
-  </property>
-
-  <property>
-    <name>hive.security.authenticator.manager</name>
-    <value>org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator</value>
-    <description>Hive client authenticator manager class name. The user-defined authenticator class should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.  </description>
-  </property>
-
-  <property>
-    <name>hive.server2.enable.doAs</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>fs.hdfs.impl.disable.cache</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>fs.file.impl.disable.cache</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.enforce.bucketing</name>
-    <value>true</value>
-    <description>Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced.</description>
-  </property>
-
-  <property>
-    <name>hive.enforce.sorting</name>
-    <value>true</value>
-    <description>Whether sorting is enforced. If true, while inserting into the table, sorting is enforced.</description>
-  </property>
-
-  <property>
-    <name>hive.map.aggr</name>
-    <value>true</value>
-    <description>Whether to use map-side aggregation in Hive Group By queries.</description>
-  </property>
-
-  <property>
-    <name>hive.optimize.bucketmapjoin</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.optimize.bucketmapjoin.sortedmerge</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.mapred.reduce.tasks.speculative.execution</name>
-    <value>false</value>
-    <description>Whether speculative execution for reducers should be turned on.</description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join</name>
-    <value>true</value>
-    <description>Whether Hive enable the optimization about converting common
-      join into mapjoin based on the input file size.</description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.sortmerge.join</name>
-    <value>true</value>
-    <description>Will the join be automatically converted to a sort-merge join, if the joined tables pass
-      the criteria for sort-merge join.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.sortmerge.join.noconditionaltask</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join.noconditionaltask</name>
-    <value>true</value>
-    <description>Whether Hive enable the optimization about converting common join into mapjoin based on the input file
-      size. If this paramater is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the
-      specified size, the join is directly converted to a mapjoin (there is no conditional task).
-    </description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join.noconditionaltask.size</name>
-    <value>1000000000</value>
-    <description>If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. However, if it
-      is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, the join is directly
-      converted to a mapjoin(there is no conditional task). The default is 10MB.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.optimize.reducededuplication.min.reducer</name>
-    <value>1</value>
-    <description>Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS.
-      That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.
-      The optimization will be disabled if number of reducers is less than specified value.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.optimize.mapjoin.mapreduce</name>
-    <value>true</value>
-    <description>If hive.auto.convert.join is off, this parameter does not take
-      affect. If it is on, and if there are map-join jobs followed by a map-reduce
-      job (for e.g a group by), each map-only job is merged with the following
-      map-reduce job.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.mapjoin.bucket.cache.size</name>
-    <value>10000</value>
-    <description>
-      Size per reducer.The default is 1G, i.e if the input size is 10G, it
-      will use 10 reducers.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.vectorized.execution.enabled</name>
-    <value>false</value>
-  </property>
-
-  <property>
-    <name>hive.optimize.reducededuplication</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.optimize.index.filter</name>
-    <value>true</value>
-    <description>
-      Whether to enable automatic use of indexes
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HIVE/metainfo.xml
deleted file mode 100644
index ca91fc2..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HIVE/metainfo.xml
+++ /dev/null
@@ -1,45 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
-    <version>0.12.0.2.0.6.0</version>
-
-    <components>        
-        <component>
-            <name>HIVE_METASTORE</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>HIVE_SERVER</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>MYSQL_SERVER</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>HIVE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-    <configuration-dependencies>
-      <config-type>global</config-type>
-      <config-type>hive-site</config-type>
-    </configuration-dependencies>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/configuration/container-executor.cfg
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/configuration/container-executor.cfg b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/configuration/container-executor.cfg
deleted file mode 100644
index 502ddaa..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/configuration/container-executor.cfg
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-yarn.nodemanager.local-dirs=TODO-YARN-LOCAL-DIR
-yarn.nodemanager.linux-container-executor.group=hadoop
-yarn.nodemanager.log-dirs=TODO-YARN-LOG-DIR
-banned.users=hfds,bin,0

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/configuration/core-site.xml
deleted file mode 100644
index 3a2af49..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/configuration/core-site.xml
+++ /dev/null
@@ -1,20 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<configuration>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/configuration/global.xml
deleted file mode 100644
index ceedd56..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/configuration/global.xml
+++ /dev/null
@@ -1,44 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>hs_host</name>
-    <value></value>
-    <description>History Server.</description>
-  </property>
-  <property>
-    <name>mapred_log_dir_prefix</name>
-    <value>/var/log/hadoop-mapreduce</value>
-    <description>Mapreduce Log Dir Prefix</description>
-  </property>
-  <property>
-    <name>mapred_pid_dir_prefix</name>
-    <value>/var/run/hadoop-mapreduce</value>
-    <description>Mapreduce PID Dir Prefix</description>
-  </property>
-  <property>
-    <name>mapred_user</name>
-    <value>mapred</value>
-    <description>Mapreduce User</description>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/configuration/mapred-queue-acls.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/configuration/mapred-queue-acls.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/configuration/mapred-queue-acls.xml
deleted file mode 100644
index ce12380..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/configuration/mapred-queue-acls.xml
+++ /dev/null
@@ -1,39 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- mapred-queue-acls.xml -->
-<configuration>
-
-
-<!-- queue default -->
-
-  <property>
-    <name>mapred.queue.default.acl-submit-job</name>
-    <value>*</value>
-  </property>
-
-  <property>
-    <name>mapred.queue.default.acl-administer-jobs</name>
-    <value>*</value>
-  </property>
-
-  <!-- END ACLs -->
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/configuration/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/configuration/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/configuration/mapred-site.xml
deleted file mode 100644
index de11867..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/configuration/mapred-site.xml
+++ /dev/null
@@ -1,379 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-  <!-- i/o properties -->
-
-  <property>
-    <name>mapreduce.task.io.sort.mb</name>
-    <value>200</value>
-    <description>
-      The total amount of buffer memory to use while sorting files, in megabytes.
-      By default, gives each merge stream 1MB, which should minimize seeks.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.map.sort.spill.percent</name>
-    <value>0.7</value>
-    <description>
-      The soft limit in the serialization buffer. Once reached, a thread will
-      begin to spill the contents to disk in the background. Note that
-      collection will not block if this threshold is exceeded while a spill
-      is already in progress, so spills may be larger than this threshold when
-      it is set to less than .5
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.task.io.sort.factor</name>
-    <value>100</value>
-    <description>
-      The number of streams to merge at once while sorting files.
-      This determines the number of open file handles.
-    </description>
-  </property>
-
-  <!-- map/reduce properties -->
-  <property>
-    <name>mapreduce.cluster.administrators</name>
-    <value> hadoop</value>
-    <description>
-      Administrators for MapReduce applications.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.shuffle.parallelcopies</name>
-    <value>30</value>
-    <description>
-      The default number of parallel transfers run by reduce during
-      the copy(shuffle) phase.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.map.speculative</name>
-    <value>false</value>
-    <description>
-      If true, then multiple instances of some map tasks
-      may be executed in parallel.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.speculative</name>
-    <value>false</value>
-    <description>
-      If true, then multiple instances of some reduce tasks may be
-      executed in parallel.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.job.reduce.slowstart.completedmaps</name>
-    <value>0.05</value>
-    <description>
-      Fraction of the number of maps in the job which should be complete before
-      reduces are scheduled for the job.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.shuffle.merge.percent</name>
-    <value>0.66</value>
-    <description>
-      The usage threshold at which an in-memory merge will be
-      initiated, expressed as a percentage of the total memory allocated to
-      storing in-memory map outputs, as defined by
-      mapreduce.reduce.shuffle.input.buffer.percent.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.shuffle.input.buffer.percent</name>
-    <value>0.7</value>
-    <description>
-      The percentage of memory to be allocated from the maximum heap
-      size to storing map outputs during the shuffle.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.map.output.compress.codec</name>
-    <value></value>
-    <description>If the map outputs are compressed, how should they be
-      compressed
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.output.fileoutputformat.compress.type</name>
-    <value>BLOCK</value>
-    <description>
-      If the job outputs are to compressed as SequenceFiles, how should
-      they be compressed? Should be one of NONE, RECORD or BLOCK.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.input.buffer.percent</name>
-    <value>0.0</value>
-    <description>
-      The percentage of memory- relative to the maximum heap size- to
-      retain map outputs during the reduce. When the shuffle is concluded, any
-      remaining map outputs in memory must consume less than this threshold before
-      the reduce can begin.
-    </description>
-  </property>
-
-  <!-- copied from kryptonite configuration -->
-  <property>
-    <name>mapreduce.map.output.compress</name>
-    <value>false</value>
-  </property>
-
-  <property>
-    <name>mapreduce.task.timeout</name>
-    <value>300000</value>
-    <description>
-      The number of milliseconds before a task will be
-      terminated if it neither reads an input, writes an output, nor
-      updates its status string.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.map.memory.mb</name>
-    <value>1024</value>
-    <description>Virtual memory for single Map task</description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.memory.mb</name>
-    <value>1024</value>
-    <description>Virtual memory for single Reduce task</description>
-  </property>
-
-  <property>
-    <name>mapreduce.jobhistory.keytab.file</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>The keytab for the job history server principal.</description>
-  </property>
-
-  <property>
-    <name>mapreduce.shuffle.port</name>
-    <value>13562</value>
-    <description>
-      Default port that the ShuffleHandler will run on.
-      ShuffleHandler is a service run at the NodeManager to facilitate
-      transfers of intermediate Map outputs to requesting Reducers.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.jobhistory.intermediate-done-dir</name>
-    <value>/mr-history/tmp</value>
-    <description>
-      Directory where history files are written by MapReduce jobs.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.jobhistory.done-dir</name>
-    <value>/mr-history/done</value>
-    <description>
-      Directory where history files are managed by the MR JobHistory Server.
-    </description>
-  </property>
-
-  <property>       
-    <name>mapreduce.jobhistory.address</name>
-    <value>localhost:10020</value>
-    <description>Enter your JobHistoryServer hostname.</description>
-  </property>
-
-  <property>       
-    <name>mapreduce.jobhistory.webapp.address</name>
-    <value>localhost:19888</value>
-    <description>Enter your JobHistoryServer hostname.</description>
-  </property>
-
-  <property>
-    <name>mapreduce.framework.name</name>
-    <value>yarn</value>
-    <description>
-      The runtime framework for executing MapReduce jobs. Can be one of local,
-      classic or yarn.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.staging-dir</name>
-    <value>/user</value>
-    <description>
-      The staging dir used while submitting jobs.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.resource.mb</name>
-    <value>512</value>
-    <description>The amount of memory the MR AppMaster needs.</description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.command-opts</name>
-    <value>-Xmx312m</value>
-    <description>
-      Java opts for the MR App Master processes.
-      The following symbol, if present, will be interpolated: @taskid@ is replaced
-      by current TaskID. Any other occurrences of '@' will go unchanged.
-      For example, to enable verbose gc logging to a file named for the taskid in
-      /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of:
-      -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc
-
-      Usage of -Djava.library.path can cause programs to no longer function if
-      hadoop native libraries are used. These values should instead be set as part
-      of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
-      mapreduce.reduce.env config settings.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.admin-command-opts</name>
-    <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
-    <description>
-      Java opts for the MR App Master processes for admin purposes.
-      It will appears before the opts set by yarn.app.mapreduce.am.command-opts and
-      thus its options can be overridden user.
-
-      Usage of -Djava.library.path can cause programs to no longer function if
-      hadoop native libraries are used. These values should instead be set as part
-      of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
-      mapreduce.reduce.env config settings.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.log.level</name>
-    <value>INFO</value>
-    <description>MR App Master process log level.</description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.env</name>
-    <value></value>
-    <description>
-      User added environment variables for the MR App Master
-      processes. Example :
-      1) A=foo  This will set the env variable A to foo
-      2) B=$B:c This is inherit tasktracker's B env variable.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.admin.map.child.java.opts</name>
-    <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
-  </property>
-
-  <property>
-    <name>mapreduce.admin.reduce.child.java.opts</name>
-    <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
-  </property>
-
-  <property>
-    <name>mapreduce.application.classpath</name>
-    <value>$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*</value>
-    <description>
-      CLASSPATH for MR applications. A comma-separated list of CLASSPATH
-      entries.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.am.max-attempts</name>
-    <value>2</value>
-    <description>
-      The maximum number of application attempts. It is a
-      application-specific setting. It should not be larger than the global number
-      set by resourcemanager. Otherwise, it will be override. The default number is
-      set to 2, to allow at least one retry for AM.
-    </description>
-  </property>
-
-
-  <property>
-    <name>mapreduce.map.java.opts</name>
-    <value>-Xmx756m</value>
-    <description>
-      Larger heap-size for child jvms of maps.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.java.opts</name>
-    <value>-Xmx756m</value>
-    <description>
-      Larger heap-size for child jvms of reduces.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.map.log.level</name>
-    <value>INFO</value>
-    <description>
-      The logging level for the map task. The allowed levels are:
-      OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.log.level</name>
-    <value>INFO</value>
-    <description>
-      The logging level for the reduce task. The allowed levels are:
-      OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.admin.user.env</name>
-    <value>LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/`$JAVA_HOME/bin/java -d32 -version &amp;&gt; /dev/null;if [ $? -eq 0 ]; then echo Linux-i386-32; else echo Linux-amd64-64;fi`</value>
-    <description>
-      Additional execution environment entries for map and reduce task processes.
-      This is not an additive property. You must preserve the original value if
-      you want your map and reduce tasks to have access to native libraries (compression, etc)
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.output.fileoutputformat.compress</name>
-    <value>false</value>
-    <description>
-      Should the job outputs be compressed?
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/metainfo.xml
deleted file mode 100644
index 069873a..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/metainfo.xml
+++ /dev/null
@@ -1,38 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>mapred</user>
-    <comment>Apache Hadoop NextGen MapReduce (client libraries)</comment>
-    <version>2.1.0.2.0.6.0</version>
-    <components>
-        <component>
-            <name>HISTORYSERVER</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>MAPREDUCE2_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-    <configuration-dependencies>
-      <config-type>core-site</config-type>
-      <config-type>global</config-type>
-      <config-type>mapred-site</config-type>
-      <config-type>mapred-queue-acls</config-type>
-    </configuration-dependencies>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/NAGIOS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/NAGIOS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/NAGIOS/metainfo.xml
deleted file mode 100644
index 76471cf..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/NAGIOS/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Nagios Monitoring and Alerting system</comment>
-    <version>3.5.0</version>
-
-    <components>
-        <component>
-            <name>NAGIOS_SERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/OOZIE/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/OOZIE/configuration/oozie-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/OOZIE/configuration/oozie-site.xml
deleted file mode 100644
index 28529ff..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/OOZIE/configuration/oozie-site.xml
+++ /dev/null
@@ -1,313 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-        
-       http://www.apache.org/licenses/LICENSE-2.0
-  
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<configuration>
-
-  <!--
-      Refer to the oozie-default.xml file for the complete list of
-      Oozie configuration properties and their default values.
-  -->
-  <property>
-    <name>oozie.base.url</name>
-    <value>http://localhost:11000/oozie</value>
-    <description>Base Oozie URL.</description>
-  </property>
-
-  <property>
-    <name>oozie.system.id</name>
-    <value>oozie-${user.name}</value>
-    <description>
-      The Oozie system ID.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.systemmode</name>
-    <value>NORMAL</value>
-    <description>
-      System mode for  Oozie at startup.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.AuthorizationService.security.enabled</name>
-    <value>true</value>
-    <description>
-      Specifies whether security (user name/admin role) is enabled or not.
-      If disabled any user can manage Oozie system and manage any job.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.PurgeService.older.than</name>
-    <value>30</value>
-    <description>
-      Jobs older than this value, in days, will be purged by the PurgeService.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.PurgeService.purge.interval</name>
-    <value>3600</value>
-    <description>
-      Interval at which the purge service will run, in seconds.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.CallableQueueService.queue.size</name>
-    <value>1000</value>
-    <description>Max callable queue size</description>
-  </property>
-
-  <property>
-    <name>oozie.service.CallableQueueService.threads</name>
-    <value>10</value>
-    <description>Number of threads used for executing callables</description>
-  </property>
-
-  <property>
-    <name>oozie.service.CallableQueueService.callable.concurrency</name>
-    <value>3</value>
-    <description>
-      Maximum concurrency for a given callable type.
-      Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
-      Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
-      All commands that use action executors (action-start, action-end, action-kill and action-check) use
-      the action type as the callable type.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.coord.normal.default.timeout</name>
-    <value>120</value>
-    <description>Default timeout for a coordinator action input check (in minutes) for normal job.
-      -1 means infinite timeout</description>
-  </property>
-
-  <property>
-    <name>oozie.db.schema.name</name>
-    <value>oozie</value>
-    <description>
-      Oozie DataBase Name
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.HadoopAccessorService.jobTracker.whitelist</name>
-    <value> </value>
-    <description>
-      Whitelisted job tracker for Oozie service.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.authentication.type</name>
-    <value>simple</value>
-    <description>
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.HadoopAccessorService.nameNode.whitelist</name>
-    <value> </value>
-    <description>
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.WorkflowAppService.system.libpath</name>
-    <value>/user/${user.name}/share/lib</value>
-    <description>
-      System library path to use for workflow applications.
-      This path is added to workflow application if their job properties sets
-      the property 'oozie.use.system.libpath' to true.
-    </description>
-  </property>
-
-  <property>
-    <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
-    <value>false</value>
-    <description>
-      If set to true, submissions of MapReduce and Pig jobs will include
-      automatically the system library path, thus not requiring users to
-      specify where the Pig JAR files are. Instead, the ones from the system
-      library path are used.
-    </description>
-  </property>
-  <property>
-    <name>oozie.authentication.kerberos.name.rules</name>
-    <value>
-      RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/
-      RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/
-      RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
-      RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
-      DEFAULT
-    </value>
-    <description>The mapping from kerberos principal names to local OS user names.</description>
-  </property>
-  <property>
-    <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
-    <value>*=/etc/hadoop/conf</value>
-    <description>
-      Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
-      the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
-      used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
-      the relevant Hadoop *-site.xml files. If the path is relative is looked within
-      the Oozie configuration directory; though the path can be absolute (i.e. to point
-      to Hadoop client conf/ directories in the local filesystem.
-    </description>
-  </property>
-  <property>
-    <name>oozie.service.ActionService.executor.ext.classes</name>
-    <value>
-      org.apache.oozie.action.email.EmailActionExecutor,
-      org.apache.oozie.action.hadoop.HiveActionExecutor,
-      org.apache.oozie.action.hadoop.ShellActionExecutor,
-      org.apache.oozie.action.hadoop.SqoopActionExecutor,
-      org.apache.oozie.action.hadoop.DistcpActionExecutor
-    </value>
-  </property>
-
-  <property>
-    <name>oozie.service.SchemaService.wf.ext.schemas</name>
-    <value>shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd,shell-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd</value>
-  </property>
-  <property>
-    <name>oozie.service.JPAService.create.db.schema</name>
-    <value>false</value>
-    <description>
-      Creates Oozie DB.
-
-      If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
-      If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.JPAService.jdbc.driver</name>
-    <value>org.apache.derby.jdbc.EmbeddedDriver</value>
-    <description>
-      JDBC driver class.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.JPAService.jdbc.url</name>
-    <value>jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true</value>
-    <description>
-      JDBC URL.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.JPAService.jdbc.username</name>
-    <value>oozie</value>
-    <description>
-      Database user name to use to connect to the database
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.JPAService.jdbc.password</name>
-    <value> </value>
-    <description>
-      DB user password.
-
-      IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
-      if empty Configuration assumes it is NULL.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.JPAService.pool.max.active.conn</name>
-    <value>10</value>
-    <description>
-      Max number of connections.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.services</name>
-    <value>
-      org.apache.oozie.service.SchedulerService,
-      org.apache.oozie.service.InstrumentationService,
-      org.apache.oozie.service.CallableQueueService,
-      org.apache.oozie.service.UUIDService,
-      org.apache.oozie.service.ELService,
-      org.apache.oozie.service.AuthorizationService,
-      org.apache.oozie.service.UserGroupInformationService,
-      org.apache.oozie.service.HadoopAccessorService,
-      org.apache.oozie.service.URIHandlerService,
-      org.apache.oozie.service.MemoryLocksService,
-      org.apache.oozie.service.DagXLogInfoService,
-      org.apache.oozie.service.SchemaService,
-      org.apache.oozie.service.LiteWorkflowAppService,
-      org.apache.oozie.service.JPAService,
-      org.apache.oozie.service.StoreService,
-      org.apache.oozie.service.CoordinatorStoreService,
-      org.apache.oozie.service.SLAStoreService,
-      org.apache.oozie.service.DBLiteWorkflowStoreService,
-      org.apache.oozie.service.CallbackService,
-      org.apache.oozie.service.ActionService,
-      org.apache.oozie.service.ActionCheckerService,
-      org.apache.oozie.service.RecoveryService,
-      org.apache.oozie.service.PurgeService,
-      org.apache.oozie.service.CoordinatorEngineService,
-      org.apache.oozie.service.BundleEngineService,
-      org.apache.oozie.service.DagEngineService,
-      org.apache.oozie.service.CoordMaterializeTriggerService,
-      org.apache.oozie.service.StatusTransitService,
-      org.apache.oozie.service.PauseTransitService,
-      org.apache.oozie.service.GroupsService,
-      org.apache.oozie.service.ProxyUserService
-    </value>
-    <description>List of Oozie services</description>
-  </property>
-  <property>
-    <name>oozie.service.URIHandlerService.uri.handlers</name>
-    <value>org.apache.oozie.dependency.FSURIHandler,org.apache.oozie.dependency.HCatURIHandler</value>
-    <description>
-      Enlist the different uri handlers supported for data availability checks.
-    </description>
-  </property>
-  <property>
-    <name>oozie.services.ext</name>
-    <value>org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService</value>
-    <description>
-      To add/replace services defined in 'oozie.services' with custom implementations.
-      Class names must be separated by commas.
-    </description>
-  </property>
-  <property>
-    <name>oozie.service.coord.push.check.requeue.interval</name>
-    <value>30000</value>
-    <description>
-      Command re-queue interval for push dependencies (in millisecond).
-    </description>
-  </property>
-  <property>
-    <name>oozie.credentials.credentialclasses</name>
-    <value>hcat=org.apache.oozie.action.hadoop.HCatCredentials</value>
-    <description>
-      Credential Class to be used for HCat.
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/OOZIE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/OOZIE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/OOZIE/metainfo.xml
deleted file mode 100644
index 515e669..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/OOZIE/metainfo.xml
+++ /dev/null
@@ -1,38 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>System for workflow coordination and execution of Apache Hadoop jobs.  This also includes the installation of the optional Oozie Web Console which relies on and will install the &lt;a target="_blank" href="http://www.sencha.com/products/extjs/license/"&gt;ExtJS&lt;/a&gt; Library.</comment>
-    <version>4.0.0.2.0.6.0</version>
-
-    <components>
-        <component>
-            <name>OOZIE_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>OOZIE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-    <configuration-dependencies>
-      <config-type>global</config-type>
-      <config-type>oozie-site</config-type>
-    </configuration-dependencies>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/PIG/configuration/pig.properties
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/PIG/configuration/pig.properties b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/PIG/configuration/pig.properties
deleted file mode 100644
index 01000b5..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/PIG/configuration/pig.properties
+++ /dev/null
@@ -1,52 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Pig default configuration file. All values can be overwritten by pig.properties and command line arguments.
-# see bin/pig -help
-
-# brief logging (no timestamps)
-brief=false
-
-#debug level, INFO is default
-debug=INFO
-
-#verbose print all log messages to screen (default to print only INFO and above to screen)
-verbose=false
-
-#exectype local|mapreduce, mapreduce is default
-exectype=mapreduce
-
-#Enable insertion of information about script into hadoop job conf 
-pig.script.info.enabled=true
-
-#Do not spill temp files smaller than this size (bytes)
-pig.spill.size.threshold=5000000
-#EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
-#This should help reduce the number of files being spilled.
-pig.spill.gc.activation.size=40000000
-
-#the following two parameters are to help estimate the reducer number
-pig.exec.reducers.bytes.per.reducer=1000000000
-pig.exec.reducers.max=999
-
-#Temporary location to store the intermediate data.
-pig.temp.dir=/tmp/
-
-#Threshold for merging FRJoin fragment files
-pig.files.concatenation.threshold=100
-pig.optimistic.files.concatenation=false;
-
-pig.disable.counter=false

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/PIG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/PIG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/PIG/metainfo.xml
deleted file mode 100644
index 44e9cda..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/PIG/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Scripting platform for analyzing large datasets</comment>
-    <version>0.12.0.2.0.6.0</version>
-
-    <components>
-        <component>
-            <name>PIG</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/SQOOP/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/SQOOP/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/SQOOP/metainfo.xml
deleted file mode 100644
index 9a50700..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/SQOOP/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Tool for transferring bulk data between Apache Hadoop and structured data stores such as relational databases</comment>
-    <version>1.4.4.2.0.6.0</version>
-
-    <components>
-        <component>
-            <name>SQOOP</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/WEBHCAT/configuration/webhcat-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/WEBHCAT/configuration/webhcat-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/WEBHCAT/configuration/webhcat-site.xml
deleted file mode 100644
index 775632f..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/WEBHCAT/configuration/webhcat-site.xml
+++ /dev/null
@@ -1,129 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- 
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<!-- The default settings for Templeton. -->
-<!-- Edit templeton-site.xml to change settings for your local -->
-<!-- install. -->
-
-<configuration>
-
-  <property>
-    <name>templeton.port</name>
-    <value>50111</value>
-    <description>The HTTP port for the main server.</description>
-  </property>
-
-  <property>
-    <name>templeton.hadoop.conf.dir</name>
-    <value>/etc/hadoop/conf</value>
-    <description>The path to the Hadoop configuration.</description>
-  </property>
-
-  <property>
-    <name>templeton.jar</name>
-    <value>/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar</value>
-    <description>The path to the Templeton jar file.</description>
-  </property>
-
-  <property>
-    <name>templeton.libjars</name>
-    <value>/usr/lib/zookeeper/zookeeper.jar</value>
-    <description>Jars to add the the classpath.</description>
-  </property>
-
-
-  <property>
-    <name>templeton.hadoop</name>
-    <value>/usr/bin/hadoop</value>
-    <description>The path to the Hadoop executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.archive</name>
-    <value>hdfs:///apps/webhcat/pig.tar.gz</value>
-    <description>The path to the Pig archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.path</name>
-    <value>pig.tar.gz/pig/bin/pig</value>
-    <description>The path to the Pig executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hcat</name>
-    <value>/usr/bin/hcat</value>
-    <description>The path to the hcatalog executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.archive</name>
-    <value>hdfs:///apps/webhcat/hive.tar.gz</value>
-    <description>The path to the Hive archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.path</name>
-    <value>hive.tar.gz/hive/bin/hive</value>
-    <description>The path to the Hive executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.properties</name>
-    <value></value>
-
-
-
-    <description>Properties to set when running hive.</description>
-  </property>
-
-
-  <property>
-    <name>templeton.zookeeper.hosts</name>
-    <value>localhost:2181</value>
-    <description>ZooKeeper servers, as comma separated host:port pairs</description>
-  </property>
-
-  <property>
-    <name>templeton.storage.class</name>
-    <value>org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage</value>
-    <description>The class to use as storage</description>
-  </property>
-
-  <property>
-    <name>templeton.override.enabled</name>
-    <value>false</value>
-    <description>
-      Enable the override path in templeton.override.jars
-    </description>
-  </property>
-
-  <property>
-    <name>templeton.streaming.jar</name>
-    <value>hdfs:///apps/webhcat/hadoop-streaming.jar</value>
-    <description>The hdfs path to the Hadoop streaming jar file.</description>
-  </property>
-
-  <property>
-    <name>templeton.exec.timeout</name>
-    <value>60000</value>
-    <description>Time out for templeton api</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/WEBHCAT/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/WEBHCAT/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/WEBHCAT/metainfo.xml
deleted file mode 100644
index 91267e4..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/WEBHCAT/metainfo.xml
+++ /dev/null
@@ -1,31 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for WEBHCAT service</comment>
-    <version>0.12.0.2.0.6.0</version>
-
-    <components>
-        <component>
-            <name>WEBHCAT_SERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-
-</metainfo>