You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by yu...@apache.org on 2013/11/15 20:12:22 UTC

[01/14] AMBARI-3777. Remove HDPLocal stack from stack definition. (yusaku)

Updated Branches:
  refs/heads/trunk 6ccdcc836 -> d3e1eab5b


http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/YARN/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/YARN/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/YARN/configuration/capacity-scheduler.xml
deleted file mode 100644
index ccfb779..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/YARN/configuration/capacity-scheduler.xml
+++ /dev/null
@@ -1,120 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<configuration>
-
-  <property>
-    <name>yarn.scheduler.capacity.maximum-applications</name>
-    <value>10000</value>
-    <description>
-      Maximum number of applications that can be pending and running.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
-    <value>0.2</value>
-    <description>
-      Maximum percent of resources in the cluster which can be used to run 
-      application masters i.e. controls number of concurrent running
-      applications.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.queues</name>
-    <value>default</value>
-    <description>
-      The queues at the this level (root is the root queue).
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.capacity</name>
-    <value>100</value>
-    <description>
-      The total capacity as a percentage out of 100 for this queue.
-      If it has child queues then this includes their capacity as well.
-      The child queues capacity should add up to their parent queue's capacity
-      or less.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.capacity</name>
-    <value>100</value>
-    <description>Default queue target capacity.</description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
-    <value>1</value>
-    <description>
-      Default queue user limit a percentage from 0.0 to 1.0.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
-    <value>100</value>
-    <description>
-      The maximum capacity of the default queue. 
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.state</name>
-    <value>RUNNING</value>
-    <description>
-      The state of the default queue. State can be one of RUNNING or STOPPED.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.acl_submit_jobs</name>
-    <value>*</value>
-    <description>
-      The ACL of who can submit jobs to the default queue.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.acl_administer_jobs</name>
-    <value>*</value>
-    <description>
-      The ACL of who can administer jobs on the default queue.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.acl_administer_queues</name>
-    <value>*</value>
-    <description>
-      The ACL for who can administer this queue i.e. change sub-queue 
-      allocations.
-    </description>
-  </property>
-  
-  <property>
-    <name>yarn.scheduler.capacity.root.unfunded.capacity</name>
-    <value>50</value>
-    <description>
-      No description
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/YARN/configuration/container-executor.cfg
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/YARN/configuration/container-executor.cfg b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/YARN/configuration/container-executor.cfg
deleted file mode 100644
index 502ddaa..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/YARN/configuration/container-executor.cfg
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-yarn.nodemanager.local-dirs=TODO-YARN-LOCAL-DIR
-yarn.nodemanager.linux-container-executor.group=hadoop
-yarn.nodemanager.log-dirs=TODO-YARN-LOG-DIR
-banned.users=hfds,bin,0

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/YARN/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/YARN/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/YARN/configuration/core-site.xml
deleted file mode 100644
index 3a2af49..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/YARN/configuration/core-site.xml
+++ /dev/null
@@ -1,20 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<configuration>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/YARN/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/YARN/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/YARN/configuration/global.xml
deleted file mode 100644
index edd1636..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/YARN/configuration/global.xml
+++ /dev/null
@@ -1,64 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>rm_host</name>
-    <value></value>
-    <description>ResourceManager.</description>
-  </property>
-  <property>
-    <name>nm_hosts</name>
-    <value></value>
-    <description>List of NodeManager Hosts.</description>
-  </property>
-  <property>
-    <name>yarn_log_dir_prefix</name>
-    <value>/var/log/hadoop-yarn</value>
-    <description>YARN Log Dir Prefix</description>
-  </property>
-  <property>
-    <name>yarn_pid_dir_prefix</name>
-    <value>/var/run/hadoop-yarn</value>
-    <description>YARN PID Dir Prefix</description>
-  </property>
-  <property>
-    <name>yarn_user</name>
-    <value>yarn</value>
-    <description>YARN User</description>
-  </property>
-  <property>
-    <name>yarn_heapsize</name>
-    <value>1024</value>
-    <description>Max heapsize for all YARN components using a numerical value in the scale of MB</description>
-  </property>
-  <property>
-    <name>resourcemanager_heapsize</name>
-    <value>1024</value>
-    <description>Max heapsize for ResourceManager using a numerical value in the scale of MB</description>
-  </property>
-  <property>
-    <name>nodemanager_heapsize</name>
-    <value>1024</value>
-    <description>Max heapsize for NodeManager using a numerical value in the scale of MB</description>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/YARN/configuration/yarn-site.xml
deleted file mode 100644
index 05e23a9..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/YARN/configuration/yarn-site.xml
+++ /dev/null
@@ -1,326 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-  <!-- ResourceManager -->
-
-  <property>
-    <name>yarn.resourcemanager.hostname</name>
-    <value>localhost</value>
-    <description>The hostname of the RM.</description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.resource-tracker.address</name>
-    <value>localhost:8025</value>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.scheduler.address</name>
-    <value>localhost:8030</value>
-    <description>The address of the scheduler interface.</description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.address</name>
-    <value>localhost:8050</value>
-    <description>
-      The address of the applications manager interface in the
-      RM.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.admin.address</name>
-    <value>localhost:8141</value>
-    <description>The address of the RM admin interface.</description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.scheduler.class</name>
-    <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
-    <description>The class to use as the resource scheduler.</description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.minimum-allocation-mb</name>
-    <value>512</value>
-    <description>
-      TThe minimum allocation for every container request at the RM,
-      in MBs. Memory requests lower than this won't take effect,
-      and the specified value will get allocated at minimum.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.maximum-allocation-mb</name>
-    <value>2048</value>
-    <description>
-      The maximum allocation for every container request at the RM,
-      in MBs. Memory requests higher than this won't take effect,
-      and will get capped to this value.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.acl.enable</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>yarn.admin.acl</name>
-    <value>*</value>
-  </property>
-
-  <!-- NodeManager -->
-
-  <property>
-    <name>yarn.nodemanager.address</name>
-    <value>0.0.0.0:45454</value>
-    <description>The address of the container manager in the NM.</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.resource.memory-mb</name>
-    <value>5120</value>
-    <description>Amount of physical memory, in MB, that can be allocated
-      for containers.</description>
-  </property>
-
-  <property>
-    <name>yarn.application.classpath</name>
-    <value>/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*</value>
-    <description>Classpath for typical applications.</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.vmem-pmem-ratio</name>
-    <value>2.1</value>
-    <description>Ratio between virtual memory to physical memory when
-      setting memory limits for containers. Container allocations are
-      expressed in terms of physical memory, and virtual memory usage
-      is allowed to exceed this allocation by this ratio.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.container-executor.class</name>
-    <value>org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor</value>
-    <description>ContainerExecutor for launching containers</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.linux-container-executor.group</name>
-    <value>hadoop</value>
-    <description>Unix group of the NodeManager</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.aux-services</name>
-    <value>mapreduce_shuffle</value>
-    <description>Auxilliary services of NodeManager. A valid service name should only contain a-zA-Z0-9_ and can
-      not start with numbers</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
-    <value>org.apache.hadoop.mapred.ShuffleHandler</value>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.log-dirs</name>
-    <value>/hadoop/yarn/log</value>
-    <description>
-      Where to store container logs. An application's localized log directory
-      will be found in ${yarn.nodemanager.log-dirs}/application_${appid}.
-      Individual containers' log directories will be below this, in directories
-      named container_{$contid}. Each container directory will contain the files
-      stderr, stdin, and syslog generated by that container.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.local-dirs</name>
-    <value>/hadoop/yarn/local</value>
-    <description>
-      List of directories to store localized files in. An
-      application's localized file directory will be found in:
-      ${yarn.nodemanager.local-dirs}/usercache/${user}/appcache/application_${appid}.
-      Individual containers' work directories, called container_${contid}, will
-      be subdirectories of this.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.container-monitor.interval-ms</name>
-    <value>3000</value>
-    <description>
-      The interval, in milliseconds, for which the node manager
-      waits  between two cycles of monitoring its containers' memory usage.
-    </description>
-  </property>
-
-  <!--
-  <property>
-    <name>yarn.nodemanager.health-checker.script.path</name>
-    <value>/etc/hadoop/conf/health_check_nodemanager</value>
-    <description>The health check script to run.</description>
-  </property>
-   -->
-
-  <property>
-    <name>yarn.nodemanager.health-checker.interval-ms</name>
-    <value>135000</value>
-    <description>Frequency of running node health script.</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.health-checker.script.timeout-ms</name>
-    <value>60000</value>
-    <description>Script time out period.</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.log.retain-second</name>
-    <value>604800</value>
-    <description>
-      Time in seconds to retain user logs. Only applicable if
-      log aggregation is disabled.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.log-aggregation-enable</name>
-    <value>true</value>
-    <description>Whether to enable log aggregation</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.remote-app-log-dir</name>
-    <value>/app-logs</value>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.remote-app-log-dir-suffix</name>
-    <value>logs</value>
-    <description>
-      The remote log dir will be created at
-      {yarn.nodemanager.remote-app-log-dir}/${user}/{thisParam}.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.log-aggregation.compression-type</name>
-    <value>gz</value>
-    <description>
-      T-file compression types used to compress aggregated logs.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.delete.debug-delay-sec</name>
-    <value>0</value>
-    <description>
-      Number of seconds after an application finishes before the nodemanager's
-      DeletionService will delete the application's localized file directory
-      and log directory.
-
-      To diagnose Yarn application problems, set this property's value large
-      enough (for example, to 600 = 10 minutes) to permit examination of these
-      directories. After changing the property's value, you must restart the
-      nodemanager in order for it to have an effect.
-
-      The roots of Yarn applications' work directories is configurable with
-      the yarn.nodemanager.local-dirs property (see below), and the roots
-      of the Yarn applications' log directories is configurable with the
-      yarn.nodemanager.log-dirs property (see also below).
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.log-aggregation.retain-seconds</name>
-    <value>2592000</value>
-    <description>
-      How long to keep aggregation logs before deleting them. -1 disables.
-      Be careful set this too small and you will spam the name node.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.admin-env</name>
-    <value>MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX</value>
-    <description>
-      Environment variables that should be forwarded from the NodeManager's
-      environment to the container's.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.disk-health-checker.min-healthy-disks</name>
-    <value>0.25</value>
-    <description>
-      The minimum fraction of number of disks to be healthy for the nodemanager
-      to launch new containers. This correspond to both
-      yarn-nodemanager.local-dirs and yarn.nodemanager.log-dirs. i.e.
-      If there are less number of healthy local-dirs (or log-dirs) available,
-      then new containers will not be launched on this node.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.am.max-attempts</name>
-    <value>2</value>
-    <description>
-      The maximum number of application attempts. It's a global
-      setting for all application masters. Each application master can specify
-      its individual maximum number of application attempts via the API, but the
-      individual number cannot be more than the global upper bound. If it is,
-      the resourcemanager will override it. The default number is set to 2, to
-      allow at least one retry for AM.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.webapp.address</name>
-    <value>localhost:8088</value>
-    <description>
-      The address of the RM web application.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.vmem-check-enabled</name>
-    <value>false</value>
-    <description>
-      Whether virtual memory limits will be enforced for containers.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.log.server.url</name>
-    <value>http://localhost:19888/jobhistory/logs</value>
-    <description>
-      URI for the HistoryServer's log resource
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/YARN/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/YARN/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/YARN/metainfo.xml
deleted file mode 100644
index 8187329..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/YARN/metainfo.xml
+++ /dev/null
@@ -1,42 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>mapred</user>
-    <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
-    <version>2.1.0.2.0.6.0</version>
-    <components>
-        <component>
-            <name>RESOURCEMANAGER</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>NODEMANAGER</name>
-            <category>SLAVE</category>
-        </component>
-       <component>
-            <name>YARN_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-    <configuration-dependencies>
-      <config-type>global</config-type>
-      <config-type>core-site</config-type>
-      <config-type>yarn-site</config-type>
-      <config-type>capacity-scheduler</config-type>
-    </configuration-dependencies>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/ZOOKEEPER/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/ZOOKEEPER/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/ZOOKEEPER/configuration/global.xml
deleted file mode 100644
index f78df89..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/ZOOKEEPER/configuration/global.xml
+++ /dev/null
@@ -1,75 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>zk_user</name>
-    <value>zookeeper</value>
-    <description>ZooKeeper User.</description>
-  </property>
-  <property>
-    <name>zookeeperserver_host</name>
-    <value></value>
-    <description>ZooKeeper Server Hosts.</description>
-  </property>
-  <property>
-    <name>zk_data_dir</name>
-    <value>/hadoop/zookeeper</value>
-    <description>Data directory for ZooKeeper.</description>
-  </property>
-  <property>
-    <name>zk_log_dir</name>
-    <value>/var/log/zookeeper</value>
-    <description>ZooKeeper Log Dir</description>
-  </property>
-  <property>
-    <name>zk_pid_dir</name>
-    <value>/var/run/zookeeper</value>
-    <description>ZooKeeper Pid Dir</description>
-  </property>
-  <property>
-    <name>zk_pid_file</name>
-    <value>/var/run/zookeeper/zookeeper_server.pid</value>
-    <description>ZooKeeper Pid File</description>
-  </property>
-  <property>
-    <name>tickTime</name>
-    <value>2000</value>
-    <description>The length of a single tick in milliseconds, which is the basic time unit used by ZooKeeper</description>
-  </property>
-  <property>
-    <name>initLimit</name>
-    <value>10</value>
-    <description>Ticks to allow for sync at Init.</description>
-  </property>
-  <property>
-    <name>syncLimit</name>
-    <value>5</value>
-    <description>Ticks to allow for sync at Runtime.</description>
-  </property>
-  <property>
-    <name>clientPort</name>
-    <value>2181</value>
-    <description>Port for running ZK Server.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/ZOOKEEPER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/ZOOKEEPER/metainfo.xml
deleted file mode 100644
index 9c7757a..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/ZOOKEEPER/metainfo.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Centralized service which provides highly reliable distributed coordination</comment>
-    <version>3.4.5.2.0.6.0</version>
-
-    <components>
-        <component>
-            <name>ZOOKEEPER_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>ZOOKEEPER_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>


[02/14] AMBARI-3777. Remove HDPLocal stack from stack definition. (yusaku)

Posted by yu...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HDFS/configuration/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HDFS/configuration/hadoop-policy.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HDFS/configuration/hadoop-policy.xml
deleted file mode 100644
index 51b01bb..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HDFS/configuration/hadoop-policy.xml
+++ /dev/null
@@ -1,134 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientProtocol, which is used by user code
-    via the DistributedFileSystem.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.client.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
-    for block recovery.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for DatanodeProtocol, which is used by datanodes to
-    communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
-    for updating generation timestamp.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.namenode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for NamenodeProtocol, the protocol used by the secondary
-    namenode to communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.tracker.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterTrackerProtocol, used by the tasktrackers to
-    communicate with the jobtracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.job.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for JobSubmissionProtocol, used by job clients to
-    communciate with the jobtracker for job submission, querying job status etc.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.job.task.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
-    tasks to communicate with the parent tasktracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
- <property>
-    <name>security.admin.operations.protocol.acl</name>
-    <value>hadoop</value>
-    <description>ACL for AdminOperationsProtocol. Used for admin commands.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.refresh.usertogroups.mappings.protocol.acl</name>
-    <value>hadoop</value>
-    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
-    users mappings. The ACL is a comma-separated list of user and
-    group names. The user and group list is separated by a blank. For
-    e.g. "alice,bob users,wheel".  A special value of "*" means all
-    users are allowed.</description>
-  </property>
-
-<property>
-    <name>security.refresh.policy.protocol.acl</name>
-    <value>hadoop</value>
-    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
-    dfsadmin and mradmin commands to refresh the security policy in-effect.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HDFS/configuration/hdfs-site.xml
deleted file mode 100644
index d1f271e..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HDFS/configuration/hdfs-site.xml
+++ /dev/null
@@ -1,484 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-  <!-- file system properties -->
-
-  <property>
-    <name>dfs.namenode.name.dir</name>
-    <!-- cluster variant -->
-    <value>/hadoop/hdfs/namenode</value>
-    <description>Determines where on the local filesystem the DFS name node
-      should store the name table.  If this is a comma-delimited list
-      of directories then the name table is replicated in all of the
-      directories, for redundancy. </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.support.append</name>
-    <value>true</value>
-    <description>to enable dfs append</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.webhdfs.enabled</name>
-    <value>true</value>
-    <description>Whether to enable WebHDFS feature</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.datanode.failed.volumes.tolerated</name>
-    <value>0</value>
-    <description> Number of failed disks a DataNode would tolerate before it stops offering service</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.datanode.data.dir</name>
-    <value>/hadoop/hdfs/data</value>
-    <description>Determines where on the local filesystem an DFS data node
-      should store its blocks.  If this is a comma-delimited
-      list of directories, then data will be stored in all named
-      directories, typically on different devices.
-      Directories that do not exist are ignored.
-    </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.hosts.exclude</name>
-    <value>/etc/hadoop/conf/dfs.exclude</value>
-    <description>Names a file that contains a list of hosts that are
-      not permitted to connect to the namenode.  The full pathname of the
-      file must be specified.  If the value is empty, no hosts are
-      excluded.</description>
-  </property>
-
-  <!--
-    <property>
-      <name>dfs.hosts</name>
-      <value>/etc/hadoop/conf/dfs.include</value>
-      <description>Names a file that contains a list of hosts that are
-      permitted to connect to the namenode. The full pathname of the file
-      must be specified.  If the value is empty, all hosts are
-      permitted.</description>
-    </property>
-  -->
-
-  <property>
-    <name>dfs.namenode.checkpoint.dir</name>
-    <value>/hadoop/hdfs/namesecondary</value>
-    <description>Determines where on the local filesystem the DFS secondary
-      name node should store the temporary images to merge.
-      If this is a comma-delimited list of directories then the image is
-      replicated in all of the directories for redundancy.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.checkpoint.edits.dir</name>
-    <value>${dfs.namenode.checkpoint.dir}</value>
-    <description>Determines where on the local filesystem the DFS secondary
-      name node should store the temporary edits to merge.
-      If this is a comma-delimited list of directoires then teh edits is
-      replicated in all of the directoires for redundancy.
-      Default value is same as dfs.namenode.checkpoint.dir
-    </description>
-  </property>
-
-
-  <property>
-    <name>dfs.namenode.checkpoint.period</name>
-    <value>21600</value>
-    <description>The number of seconds between two periodic checkpoints.
-    </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.size</name>
-    <value>67108864</value>
-    <description>The size of the current edit log (in bytes) that triggers
-      a periodic checkpoint even if the maximum checkpoint delay is not reached
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.replication.max</name>
-    <value>50</value>
-    <description>Maximal block replication.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.replication</name>
-    <value>3</value>
-    <description>Default block replication.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.heartbeat.interval</name>
-    <value>3</value>
-    <description>Determines datanode heartbeat interval in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.heartbeat.interval</name>
-    <value>3</value>
-    <description>Determines datanode heartbeat interval in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.safemode.threshold-pct</name>
-    <value>1.0f</value>
-    <description>
-      Specifies the percentage of blocks that should satisfy
-      the minimal replication requirement defined by dfs.namenode.replication.min.
-      Values less than or equal to 0 mean not to start in safe mode.
-      Values greater than 1 will make safe mode permanent.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.balance.bandwidthPerSec</name>
-    <value>6250000</value>
-    <description>
-      Specifies the maximum amount of bandwidth that each datanode
-      can utilize for the balancing purpose in term of
-      the number of bytes per second.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.https.port</name>
-    <value>50470</value>
-    <description>
-      This property is used by HftpFileSystem.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.address</name>
-    <value>0.0.0.0:50010</value>
-  </property>
-
-  <property>
-    <name>dfs.datanode.http.address</name>
-    <value>0.0.0.0:50075</value>
-  </property>
-
-  <property>
-    <name>dfs.blocksize</name>
-    <value>134217728</value>
-    <description>The default block size for new files.</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.http-address</name>
-    <value>localhost:50070</value>
-    <description>The name of the default file system.  Either the
-      literal string "local" or a host:port for NDFS.</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.datanode.du.reserved</name>
-    <!-- cluster variant -->
-    <value>1073741824</value>
-    <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.ipc.address</name>
-    <value>0.0.0.0:8010</value>
-    <description>
-      The datanode ipc server address and port.
-      If the port is 0 then the server will start on a free port.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.blockreport.initialDelay</name>
-    <value>120</value>
-    <description>Delay for first block report in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.handler.count</name>
-    <value>40</value>
-    <description>The number of server threads for the namenode.</description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.max.transfer.threads</name>
-    <value>1024</value>
-    <description>PRIVATE CONFIG VARIABLE</description>
-  </property>
-
-  <!-- Permissions configuration -->
-
-  <property>
-    <name>fs.permissions.umask-mode</name>
-    <value>022</value>
-    <description>
-      The octal umask used when creating files and directories.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.permissions.enabled</name>
-    <value>true</value>
-    <description>
-      If "true", enable permission checking in HDFS.
-      If "false", permission checking is turned off,
-      but all other behavior is unchanged.
-      Switching from one parameter value to the other does not change the mode,
-      owner or group of files or directories.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.permissions.superusergroup</name>
-    <value>hdfs</value>
-    <description>The name of the group of super-users.</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.handler.count</name>
-    <value>100</value>
-    <description>Added to grow Queue size so that more client connections are allowed</description>
-  </property>
-
-  <property>
-    <name>dfs.block.access.token.enable</name>
-    <value>true</value>
-    <description>
-      If "true", access tokens are used as capabilities for accessing datanodes.
-      If "false", no access tokens are checked on accessing datanodes.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.kerberos.principal</name>
-    <value></value>
-    <description>
-      Kerberos principal name for the NameNode
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.kerberos.principal</name>
-    <value></value>
-    <description>
-      Kerberos principal name for the secondary NameNode.
-    </description>
-  </property>
-
-
-  <!--
-    This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
-  -->
-  <property>
-    <name>dfs.namenode.kerberos.https.principal</name>
-    <value></value>
-    <description>The Kerberos principal for the host that the NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.kerberos.https.principal</name>
-    <value></value>
-    <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <!-- cluster variant -->
-    <name>dfs.namenode.secondary.http-address</name>
-    <value>localhost:50090</value>
-    <description>Address of secondary namenode web server</description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.principal</name>
-    <value></value>
-    <description>
-      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
-      HTTP SPENGO specification.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.keytab</name>
-    <value></value>
-    <description>
-      The Kerberos keytab file with the credentials for the
-      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.kerberos.principal</name>
-    <value></value>
-    <description>
-      The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.keytab.file</name>
-    <value></value>
-    <description>
-      Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.keytab.file</name>
-    <value></value>
-    <description>
-      Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.keytab.file</name>
-    <value></value>
-    <description>
-      The filename of the keytab file for the DataNode.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.https-address</name>
-    <value>localhost:50470</value>
-    <description>The https address where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.datanode.data.dir.perm</name>
-    <value>750</value>
-    <description>The permissions that should be there on dfs.datanode.data.dir
-      directories. The datanode will not come up if the permissions are
-      different on existing dfs.datanode.data.dir directories. If the directories
-      don't exist, they will be created with this permission.</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.accesstime.precision</name>
-    <value>0</value>
-    <description>The access time for HDFS file is precise upto this value.
-      The default value is 1 hour. Setting a value of 0 disables
-      access times for HDFS.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.cluster.administrators</name>
-    <value> hdfs</value>
-    <description>ACL for who all can view the default servlets in the HDFS</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.avoid.read.stale.datanode</name>
-    <value>true</value>
-    <description>
-      Indicate whether or not to avoid reading from stale datanodes whose
-      heartbeat messages have not been received by the namenode for more than a
-      specified time interval.
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.avoid.write.stale.datanode</name>
-    <value>true</value>
-    <description>
-      Indicate whether or not to avoid writing to stale datanodes whose
-      heartbeat messages have not been received by the namenode for more than a
-      specified time interval.
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.write.stale.datanode.ratio</name>
-    <value>1.0f</value>
-    <description>When the ratio of number stale datanodes to total datanodes marked is greater
-      than this ratio, stop avoiding writing to stale nodes so as to prevent causing hotspots.
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.stale.datanode.interval</name>
-    <value>30000</value>
-    <description>Datanode is stale after not getting a heartbeat in this interval in ms</description>
-  </property>
-
-  <property>
-    <name>dfs.journalnode.http-address</name>
-    <value>0.0.0.0:8480</value>
-    <description>The address and port the JournalNode web UI listens on.
-      If the port is 0 then the server will start on a free port. </description>
-  </property>
-
-  <property>
-    <name>dfs.journalnode.edits.dir</name>
-    <value>/grid/0/hdfs/journal</value>
-    <description>The path where the JournalNode daemon will store its local state. </description>
-  </property>
-
-  <!-- HDFS Short-Circuit Local Reads -->
-
-  <property>
-    <name>dfs.client.read.shortcircuit</name>
-    <value>true</value>
-    <description>
-      This configuration parameter turns on short-circuit local reads.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.client.read.shortcircuit.skip.checksum</name>
-    <value></value>
-    <description>Enable/disbale skipping the checksum check</description>
-  </property>
-
-  <property>
-    <name>dfs.domain.socket.path</name>
-    <value>/var/lib/hadoop-hdfs/dn_socket</value>
-  </property>
-
-  <property>
-    <name>dfs.client.read.shortcircuit.streams.cache.size</name>
-    <value>4096</value>
-    <description>
-      The DFSClient maintains a cache of recently opened file descriptors. This
-      parameter controls the size of that cache. Setting this higher will use
-      more file descriptors, but potentially provide better performance on
-      workloads involving lots of seeks.
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HDFS/metainfo.xml
deleted file mode 100644
index 19ac76b..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HDFS/metainfo.xml
+++ /dev/null
@@ -1,60 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Apache Hadoop Distributed File System</comment>
-    <version>2.1.0.2.0.6.0</version>
-
-    <components>
-        <component>
-            <name>NAMENODE</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>DATANODE</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>SECONDARY_NAMENODE</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>HDFS_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-        
-        <component>
-            <name>JOURNALNODE</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-          <name>ZKFC</name>
-          <category>SLAVE</category>
-        </component>
-    </components>
-    <configuration-dependencies>
-      <config-type>core-site</config-type>
-      <config-type>global</config-type>
-      <config-type>hdfs-site</config-type>
-      <config-type>hadoop-policy</config-type>
-    </configuration-dependencies>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HIVE/configuration/hive-site.xml
deleted file mode 100644
index 2a3f5d4..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HIVE/configuration/hive-site.xml
+++ /dev/null
@@ -1,260 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<configuration>
-  <property>
-    <name>javax.jdo.option.ConnectionURL</name>
-    <value>jdbc</value>
-    <description>JDBC connect string for a JDBC metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionDriverName</name>
-    <value>com.mysql.jdbc.Driver</value>
-    <description>Driver class name for a JDBC metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionUserName</name>
-    <value>hive</value>
-    <description>username to use against metastore database</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionPassword</name>
-    <value> </value>
-    <description>password to use against metastore database</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.warehouse.dir</name>
-    <value>/apps/hive/warehouse</value>
-    <description>location of default database for the warehouse</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.sasl.enabled</name>
-    <value></value>
-    <description>If true, the metastore thrift interface will be secured with SASL.
-      Clients must authenticate with Kerberos.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.kerberos.keytab.file</name>
-    <value></value>
-    <description>The path to the Kerberos Keytab file containing the metastore
-      thrift server's service principal.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.kerberos.principal</name>
-    <value></value>
-    <description>The service principal for the metastore thrift server. The special
-      string _HOST will be replaced automatically with the correct host name.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.cache.pinobjtypes</name>
-    <value>Table,Database,Type,FieldSchema,Order</value>
-    <description>List of comma separated metastore object types that should be pinned in the cache</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.uris</name>
-    <value>thrift://localhost:9083</value>
-    <description>URI for client to contact metastore server</description>
-  </property>
-
-  <property>
-    <name>hive.semantic.analyzer.factory.impl</name>
-    <value>org.apache.hivealog.cli.HCatSemanticAnalyzerFactory</value>
-    <description>controls which SemanticAnalyzerFactory implemenation class is used by CLI</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.client.socket.timeout</name>
-    <value>60</value>
-    <description>MetaStore Client socket timeout in seconds</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.execute.setugi</name>
-    <value>true</value>
-    <description>In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.</description>
-  </property>
-
-  <property>
-    <name>hive.security.authorization.enabled</name>
-    <value>false</value>
-    <description>enable or disable the hive client authorization</description>
-  </property>
-
-  <property>
-    <name>hive.security.authorization.manager</name>
-    <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
-    <description>the hive client authorization manager class name.
-      The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
-  </property>
-
-  <property>
-    <name>hive.security.metastore.authorization.manager</name>
-    <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
-    <description>The authorization manager class name to be used in the metastore for authorization. The user-defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider.  </description>
-  </property>
-
-  <property>
-    <name>hive.security.authenticator.manager</name>
-    <value>org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator</value>
-    <description>Hive client authenticator manager class name. The user-defined authenticator class should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.  </description>
-  </property>
-
-  <property>
-    <name>hive.server2.enable.doAs</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>fs.hdfs.impl.disable.cache</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>fs.file.impl.disable.cache</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.enforce.bucketing</name>
-    <value>true</value>
-    <description>Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced.</description>
-  </property>
-
-  <property>
-    <name>hive.enforce.sorting</name>
-    <value>true</value>
-    <description>Whether sorting is enforced. If true, while inserting into the table, sorting is enforced.</description>
-  </property>
-
-  <property>
-    <name>hive.map.aggr</name>
-    <value>true</value>
-    <description>Whether to use map-side aggregation in Hive Group By queries.</description>
-  </property>
-
-  <property>
-    <name>hive.optimize.bucketmapjoin</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.optimize.bucketmapjoin.sortedmerge</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.mapred.reduce.tasks.speculative.execution</name>
-    <value>false</value>
-    <description>Whether speculative execution for reducers should be turned on.</description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join</name>
-    <value>true</value>
-    <description>Whether Hive enable the optimization about converting common
-      join into mapjoin based on the input file size.</description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.sortmerge.join</name>
-    <value>true</value>
-    <description>Will the join be automatically converted to a sort-merge join, if the joined tables pass
-      the criteria for sort-merge join.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.sortmerge.join.noconditionaltask</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join.noconditionaltask</name>
-    <value>true</value>
-    <description>Whether Hive enable the optimization about converting common join into mapjoin based on the input file
-      size. If this paramater is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the
-      specified size, the join is directly converted to a mapjoin (there is no conditional task).
-    </description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join.noconditionaltask.size</name>
-    <value>1000000000</value>
-    <description>If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. However, if it
-      is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, the join is directly
-      converted to a mapjoin(there is no conditional task). The default is 10MB.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.optimize.reducededuplication.min.reducer</name>
-    <value>1</value>
-    <description>Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS.
-      That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.
-      The optimization will be disabled if number of reducers is less than specified value.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.optimize.mapjoin.mapreduce</name>
-    <value>true</value>
-    <description>If hive.auto.convert.join is off, this parameter does not take
-      affect. If it is on, and if there are map-join jobs followed by a map-reduce
-      job (for e.g a group by), each map-only job is merged with the following
-      map-reduce job.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.mapjoin.bucket.cache.size</name>
-    <value>10000</value>
-    <description>
-      Size per reducer.The default is 1G, i.e if the input size is 10G, it
-      will use 10 reducers.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.vectorized.execution.enabled</name>
-    <value>false</value>
-  </property>
-
-  <property>
-    <name>hive.optimize.reducededuplication</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.optimize.index.filter</name>
-    <value>true</value>
-    <description>
-      Whether to enable automatic use of indexes
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HIVE/metainfo.xml
deleted file mode 100644
index ca91fc2..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HIVE/metainfo.xml
+++ /dev/null
@@ -1,45 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
-    <version>0.12.0.2.0.6.0</version>
-
-    <components>        
-        <component>
-            <name>HIVE_METASTORE</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>HIVE_SERVER</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>MYSQL_SERVER</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>HIVE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-    <configuration-dependencies>
-      <config-type>global</config-type>
-      <config-type>hive-site</config-type>
-    </configuration-dependencies>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/configuration/container-executor.cfg
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/configuration/container-executor.cfg b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/configuration/container-executor.cfg
deleted file mode 100644
index 502ddaa..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/configuration/container-executor.cfg
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-yarn.nodemanager.local-dirs=TODO-YARN-LOCAL-DIR
-yarn.nodemanager.linux-container-executor.group=hadoop
-yarn.nodemanager.log-dirs=TODO-YARN-LOG-DIR
-banned.users=hfds,bin,0

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/configuration/core-site.xml
deleted file mode 100644
index 3a2af49..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/configuration/core-site.xml
+++ /dev/null
@@ -1,20 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<configuration>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/configuration/global.xml
deleted file mode 100644
index ceedd56..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/configuration/global.xml
+++ /dev/null
@@ -1,44 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>hs_host</name>
-    <value></value>
-    <description>History Server.</description>
-  </property>
-  <property>
-    <name>mapred_log_dir_prefix</name>
-    <value>/var/log/hadoop-mapreduce</value>
-    <description>Mapreduce Log Dir Prefix</description>
-  </property>
-  <property>
-    <name>mapred_pid_dir_prefix</name>
-    <value>/var/run/hadoop-mapreduce</value>
-    <description>Mapreduce PID Dir Prefix</description>
-  </property>
-  <property>
-    <name>mapred_user</name>
-    <value>mapred</value>
-    <description>Mapreduce User</description>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/configuration/mapred-queue-acls.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/configuration/mapred-queue-acls.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/configuration/mapred-queue-acls.xml
deleted file mode 100644
index ce12380..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/configuration/mapred-queue-acls.xml
+++ /dev/null
@@ -1,39 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- mapred-queue-acls.xml -->
-<configuration>
-
-
-<!-- queue default -->
-
-  <property>
-    <name>mapred.queue.default.acl-submit-job</name>
-    <value>*</value>
-  </property>
-
-  <property>
-    <name>mapred.queue.default.acl-administer-jobs</name>
-    <value>*</value>
-  </property>
-
-  <!-- END ACLs -->
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/configuration/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/configuration/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/configuration/mapred-site.xml
deleted file mode 100644
index de11867..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/configuration/mapred-site.xml
+++ /dev/null
@@ -1,379 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-  <!-- i/o properties -->
-
-  <property>
-    <name>mapreduce.task.io.sort.mb</name>
-    <value>200</value>
-    <description>
-      The total amount of buffer memory to use while sorting files, in megabytes.
-      By default, gives each merge stream 1MB, which should minimize seeks.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.map.sort.spill.percent</name>
-    <value>0.7</value>
-    <description>
-      The soft limit in the serialization buffer. Once reached, a thread will
-      begin to spill the contents to disk in the background. Note that
-      collection will not block if this threshold is exceeded while a spill
-      is already in progress, so spills may be larger than this threshold when
-      it is set to less than .5
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.task.io.sort.factor</name>
-    <value>100</value>
-    <description>
-      The number of streams to merge at once while sorting files.
-      This determines the number of open file handles.
-    </description>
-  </property>
-
-  <!-- map/reduce properties -->
-  <property>
-    <name>mapreduce.cluster.administrators</name>
-    <value> hadoop</value>
-    <description>
-      Administrators for MapReduce applications.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.shuffle.parallelcopies</name>
-    <value>30</value>
-    <description>
-      The default number of parallel transfers run by reduce during
-      the copy(shuffle) phase.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.map.speculative</name>
-    <value>false</value>
-    <description>
-      If true, then multiple instances of some map tasks
-      may be executed in parallel.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.speculative</name>
-    <value>false</value>
-    <description>
-      If true, then multiple instances of some reduce tasks may be
-      executed in parallel.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.job.reduce.slowstart.completedmaps</name>
-    <value>0.05</value>
-    <description>
-      Fraction of the number of maps in the job which should be complete before
-      reduces are scheduled for the job.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.shuffle.merge.percent</name>
-    <value>0.66</value>
-    <description>
-      The usage threshold at which an in-memory merge will be
-      initiated, expressed as a percentage of the total memory allocated to
-      storing in-memory map outputs, as defined by
-      mapreduce.reduce.shuffle.input.buffer.percent.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.shuffle.input.buffer.percent</name>
-    <value>0.7</value>
-    <description>
-      The percentage of memory to be allocated from the maximum heap
-      size to storing map outputs during the shuffle.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.map.output.compress.codec</name>
-    <value></value>
-    <description>If the map outputs are compressed, how should they be
-      compressed
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.output.fileoutputformat.compress.type</name>
-    <value>BLOCK</value>
-    <description>
-      If the job outputs are to compressed as SequenceFiles, how should
-      they be compressed? Should be one of NONE, RECORD or BLOCK.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.input.buffer.percent</name>
-    <value>0.0</value>
-    <description>
-      The percentage of memory- relative to the maximum heap size- to
-      retain map outputs during the reduce. When the shuffle is concluded, any
-      remaining map outputs in memory must consume less than this threshold before
-      the reduce can begin.
-    </description>
-  </property>
-
-  <!-- copied from kryptonite configuration -->
-  <property>
-    <name>mapreduce.map.output.compress</name>
-    <value>false</value>
-  </property>
-
-  <property>
-    <name>mapreduce.task.timeout</name>
-    <value>300000</value>
-    <description>
-      The number of milliseconds before a task will be
-      terminated if it neither reads an input, writes an output, nor
-      updates its status string.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.map.memory.mb</name>
-    <value>1024</value>
-    <description>Virtual memory for single Map task</description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.memory.mb</name>
-    <value>1024</value>
-    <description>Virtual memory for single Reduce task</description>
-  </property>
-
-  <property>
-    <name>mapreduce.jobhistory.keytab.file</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>The keytab for the job history server principal.</description>
-  </property>
-
-  <property>
-    <name>mapreduce.shuffle.port</name>
-    <value>13562</value>
-    <description>
-      Default port that the ShuffleHandler will run on.
-      ShuffleHandler is a service run at the NodeManager to facilitate
-      transfers of intermediate Map outputs to requesting Reducers.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.jobhistory.intermediate-done-dir</name>
-    <value>/mr-history/tmp</value>
-    <description>
-      Directory where history files are written by MapReduce jobs.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.jobhistory.done-dir</name>
-    <value>/mr-history/done</value>
-    <description>
-      Directory where history files are managed by the MR JobHistory Server.
-    </description>
-  </property>
-
-  <property>   ย ย ย ย 
-    <name>mapreduce.jobhistory.address</name>
-    <value>localhost:10020</value>
-    <description>Enter your JobHistoryServer hostname.</description>
-  </property>
-
-  <property>   ย ย ย ย 
-    <name>mapreduce.jobhistory.webapp.address</name>
-    <value>localhost:19888</value>
-    <description>Enter your JobHistoryServer hostname.</description>
-  </property>
-
-  <property>
-    <name>mapreduce.framework.name</name>
-    <value>yarn</value>
-    <description>
-      The runtime framework for executing MapReduce jobs. Can be one of local,
-      classic or yarn.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.staging-dir</name>
-    <value>/user</value>
-    <description>
-      The staging dir used while submitting jobs.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.resource.mb</name>
-    <value>512</value>
-    <description>The amount of memory the MR AppMaster needs.</description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.command-opts</name>
-    <value>-Xmx312m</value>
-    <description>
-      Java opts for the MR App Master processes.
-      The following symbol, if present, will be interpolated: @taskid@ is replaced
-      by current TaskID. Any other occurrences of '@' will go unchanged.
-      For example, to enable verbose gc logging to a file named for the taskid in
-      /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of:
-      -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc
-
-      Usage of -Djava.library.path can cause programs to no longer function if
-      hadoop native libraries are used. These values should instead be set as part
-      of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
-      mapreduce.reduce.env config settings.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.admin-command-opts</name>
-    <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
-    <description>
-      Java opts for the MR App Master processes for admin purposes.
-      It will appears before the opts set by yarn.app.mapreduce.am.command-opts and
-      thus its options can be overridden user.
-
-      Usage of -Djava.library.path can cause programs to no longer function if
-      hadoop native libraries are used. These values should instead be set as part
-      of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
-      mapreduce.reduce.env config settings.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.log.level</name>
-    <value>INFO</value>
-    <description>MR App Master process log level.</description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.env</name>
-    <value></value>
-    <description>
-      User added environment variables for the MR App Master
-      processes. Example :
-      1) A=foo  This will set the env variable A to foo
-      2) B=$B:c This is inherit tasktracker's B env variable.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.admin.map.child.java.opts</name>
-    <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
-  </property>
-
-  <property>
-    <name>mapreduce.admin.reduce.child.java.opts</name>
-    <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
-  </property>
-
-  <property>
-    <name>mapreduce.application.classpath</name>
-    <value>$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*</value>
-    <description>
-      CLASSPATH for MR applications. A comma-separated list of CLASSPATH
-      entries.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.am.max-attempts</name>
-    <value>2</value>
-    <description>
-      The maximum number of application attempts. It is a
-      application-specific setting. It should not be larger than the global number
-      set by resourcemanager. Otherwise, it will be override. The default number is
-      set to 2, to allow at least one retry for AM.
-    </description>
-  </property>
-
-
-  <property>
-    <name>mapreduce.map.java.opts</name>
-    <value>-Xmx756m</value>
-    <description>
-      Larger heap-size for child jvms of maps.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.java.opts</name>
-    <value>-Xmx756m</value>
-    <description>
-      Larger heap-size for child jvms of reduces.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.map.log.level</name>
-    <value>INFO</value>
-    <description>
-      The logging level for the map task. The allowed levels are:
-      OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.log.level</name>
-    <value>INFO</value>
-    <description>
-      The logging level for the reduce task. The allowed levels are:
-      OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.admin.user.env</name>
-    <value>LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/`$JAVA_HOME/bin/java -d32 -version &amp;&gt; /dev/null;if [ $? -eq 0 ]; then echo Linux-i386-32; else echo Linux-amd64-64;fi`</value>
-    <description>
-      Additional execution environment entries for map and reduce task processes.
-      This is not an additive property. You must preserve the original value if
-      you want your map and reduce tasks to have access to native libraries (compression, etc)
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.output.fileoutputformat.compress</name>
-    <value>false</value>
-    <description>
-      Should the job outputs be compressed?
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/metainfo.xml
deleted file mode 100644
index 069873a..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/MAPREDUCE2/metainfo.xml
+++ /dev/null
@@ -1,38 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>mapred</user>
-    <comment>Apache Hadoop NextGen MapReduce (client libraries)</comment>
-    <version>2.1.0.2.0.6.0</version>
-    <components>
-        <component>
-            <name>HISTORYSERVER</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>MAPREDUCE2_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-    <configuration-dependencies>
-      <config-type>core-site</config-type>
-      <config-type>global</config-type>
-      <config-type>mapred-site</config-type>
-      <config-type>mapred-queue-acls</config-type>
-    </configuration-dependencies>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/NAGIOS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/NAGIOS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/NAGIOS/metainfo.xml
deleted file mode 100644
index 76471cf..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/NAGIOS/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Nagios Monitoring and Alerting system</comment>
-    <version>3.5.0</version>
-
-    <components>
-        <component>
-            <name>NAGIOS_SERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/OOZIE/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/OOZIE/configuration/oozie-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/OOZIE/configuration/oozie-site.xml
deleted file mode 100644
index 28529ff..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/OOZIE/configuration/oozie-site.xml
+++ /dev/null
@@ -1,313 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-        
-       http://www.apache.org/licenses/LICENSE-2.0
-  
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<configuration>
-
-  <!--
-      Refer to the oozie-default.xml file for the complete list of
-      Oozie configuration properties and their default values.
-  -->
-  <property>
-    <name>oozie.base.url</name>
-    <value>http://localhost:11000/oozie</value>
-    <description>Base Oozie URL.</description>
-  </property>
-
-  <property>
-    <name>oozie.system.id</name>
-    <value>oozie-${user.name}</value>
-    <description>
-      The Oozie system ID.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.systemmode</name>
-    <value>NORMAL</value>
-    <description>
-      System mode for  Oozie at startup.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.AuthorizationService.security.enabled</name>
-    <value>true</value>
-    <description>
-      Specifies whether security (user name/admin role) is enabled or not.
-      If disabled any user can manage Oozie system and manage any job.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.PurgeService.older.than</name>
-    <value>30</value>
-    <description>
-      Jobs older than this value, in days, will be purged by the PurgeService.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.PurgeService.purge.interval</name>
-    <value>3600</value>
-    <description>
-      Interval at which the purge service will run, in seconds.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.CallableQueueService.queue.size</name>
-    <value>1000</value>
-    <description>Max callable queue size</description>
-  </property>
-
-  <property>
-    <name>oozie.service.CallableQueueService.threads</name>
-    <value>10</value>
-    <description>Number of threads used for executing callables</description>
-  </property>
-
-  <property>
-    <name>oozie.service.CallableQueueService.callable.concurrency</name>
-    <value>3</value>
-    <description>
-      Maximum concurrency for a given callable type.
-      Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
-      Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
-      All commands that use action executors (action-start, action-end, action-kill and action-check) use
-      the action type as the callable type.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.coord.normal.default.timeout</name>
-    <value>120</value>
-    <description>Default timeout for a coordinator action input check (in minutes) for normal job.
-      -1 means infinite timeout</description>
-  </property>
-
-  <property>
-    <name>oozie.db.schema.name</name>
-    <value>oozie</value>
-    <description>
-      Oozie DataBase Name
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.HadoopAccessorService.jobTracker.whitelist</name>
-    <value> </value>
-    <description>
-      Whitelisted job tracker for Oozie service.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.authentication.type</name>
-    <value>simple</value>
-    <description>
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.HadoopAccessorService.nameNode.whitelist</name>
-    <value> </value>
-    <description>
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.WorkflowAppService.system.libpath</name>
-    <value>/user/${user.name}/share/lib</value>
-    <description>
-      System library path to use for workflow applications.
-      This path is added to workflow application if their job properties sets
-      the property 'oozie.use.system.libpath' to true.
-    </description>
-  </property>
-
-  <property>
-    <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
-    <value>false</value>
-    <description>
-      If set to true, submissions of MapReduce and Pig jobs will include
-      automatically the system library path, thus not requiring users to
-      specify where the Pig JAR files are. Instead, the ones from the system
-      library path are used.
-    </description>
-  </property>
-  <property>
-    <name>oozie.authentication.kerberos.name.rules</name>
-    <value>
-      RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/
-      RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/
-      RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
-      RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
-      DEFAULT
-    </value>
-    <description>The mapping from kerberos principal names to local OS user names.</description>
-  </property>
-  <property>
-    <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
-    <value>*=/etc/hadoop/conf</value>
-    <description>
-      Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
-      the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
-      used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
-      the relevant Hadoop *-site.xml files. If the path is relative is looked within
-      the Oozie configuration directory; though the path can be absolute (i.e. to point
-      to Hadoop client conf/ directories in the local filesystem.
-    </description>
-  </property>
-  <property>
-    <name>oozie.service.ActionService.executor.ext.classes</name>
-    <value>
-      org.apache.oozie.action.email.EmailActionExecutor,
-      org.apache.oozie.action.hadoop.HiveActionExecutor,
-      org.apache.oozie.action.hadoop.ShellActionExecutor,
-      org.apache.oozie.action.hadoop.SqoopActionExecutor,
-      org.apache.oozie.action.hadoop.DistcpActionExecutor
-    </value>
-  </property>
-
-  <property>
-    <name>oozie.service.SchemaService.wf.ext.schemas</name>
-    <value>shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd,shell-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd</value>
-  </property>
-  <property>
-    <name>oozie.service.JPAService.create.db.schema</name>
-    <value>false</value>
-    <description>
-      Creates Oozie DB.
-
-      If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
-      If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.JPAService.jdbc.driver</name>
-    <value>org.apache.derby.jdbc.EmbeddedDriver</value>
-    <description>
-      JDBC driver class.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.JPAService.jdbc.url</name>
-    <value>jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true</value>
-    <description>
-      JDBC URL.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.JPAService.jdbc.username</name>
-    <value>oozie</value>
-    <description>
-      Database user name to use to connect to the database
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.JPAService.jdbc.password</name>
-    <value> </value>
-    <description>
-      DB user password.
-
-      IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
-      if empty Configuration assumes it is NULL.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.JPAService.pool.max.active.conn</name>
-    <value>10</value>
-    <description>
-      Max number of connections.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.services</name>
-    <value>
-      org.apache.oozie.service.SchedulerService,
-      org.apache.oozie.service.InstrumentationService,
-      org.apache.oozie.service.CallableQueueService,
-      org.apache.oozie.service.UUIDService,
-      org.apache.oozie.service.ELService,
-      org.apache.oozie.service.AuthorizationService,
-      org.apache.oozie.service.UserGroupInformationService,
-      org.apache.oozie.service.HadoopAccessorService,
-      org.apache.oozie.service.URIHandlerService,
-      org.apache.oozie.service.MemoryLocksService,
-      org.apache.oozie.service.DagXLogInfoService,
-      org.apache.oozie.service.SchemaService,
-      org.apache.oozie.service.LiteWorkflowAppService,
-      org.apache.oozie.service.JPAService,
-      org.apache.oozie.service.StoreService,
-      org.apache.oozie.service.CoordinatorStoreService,
-      org.apache.oozie.service.SLAStoreService,
-      org.apache.oozie.service.DBLiteWorkflowStoreService,
-      org.apache.oozie.service.CallbackService,
-      org.apache.oozie.service.ActionService,
-      org.apache.oozie.service.ActionCheckerService,
-      org.apache.oozie.service.RecoveryService,
-      org.apache.oozie.service.PurgeService,
-      org.apache.oozie.service.CoordinatorEngineService,
-      org.apache.oozie.service.BundleEngineService,
-      org.apache.oozie.service.DagEngineService,
-      org.apache.oozie.service.CoordMaterializeTriggerService,
-      org.apache.oozie.service.StatusTransitService,
-      org.apache.oozie.service.PauseTransitService,
-      org.apache.oozie.service.GroupsService,
-      org.apache.oozie.service.ProxyUserService
-    </value>
-    <description>List of Oozie services</description>
-  </property>
-  <property>
-    <name>oozie.service.URIHandlerService.uri.handlers</name>
-    <value>org.apache.oozie.dependency.FSURIHandler,org.apache.oozie.dependency.HCatURIHandler</value>
-    <description>
-      Enlist the different uri handlers supported for data availability checks.
-    </description>
-  </property>
-  <property>
-    <name>oozie.services.ext</name>
-    <value>org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService</value>
-    <description>
-      To add/replace services defined in 'oozie.services' with custom implementations.
-      Class names must be separated by commas.
-    </description>
-  </property>
-  <property>
-    <name>oozie.service.coord.push.check.requeue.interval</name>
-    <value>30000</value>
-    <description>
-      Command re-queue interval for push dependencies (in millisecond).
-    </description>
-  </property>
-  <property>
-    <name>oozie.credentials.credentialclasses</name>
-    <value>hcat=org.apache.oozie.action.hadoop.HCatCredentials</value>
-    <description>
-      Credential Class to be used for HCat.
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/OOZIE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/OOZIE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/OOZIE/metainfo.xml
deleted file mode 100644
index 515e669..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/OOZIE/metainfo.xml
+++ /dev/null
@@ -1,38 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>System for workflow coordination and execution of Apache Hadoop jobs.  This also includes the installation of the optional Oozie Web Console which relies on and will install the &lt;a target="_blank" href="http://www.sencha.com/products/extjs/license/"&gt;ExtJS&lt;/a&gt; Library.</comment>
-    <version>4.0.0.2.0.6.0</version>
-
-    <components>
-        <component>
-            <name>OOZIE_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>OOZIE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-    <configuration-dependencies>
-      <config-type>global</config-type>
-      <config-type>oozie-site</config-type>
-    </configuration-dependencies>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/PIG/configuration/pig.properties
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/PIG/configuration/pig.properties b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/PIG/configuration/pig.properties
deleted file mode 100644
index 01000b5..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/PIG/configuration/pig.properties
+++ /dev/null
@@ -1,52 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Pig default configuration file. All values can be overwritten by pig.properties and command line arguments.
-# see bin/pig -help
-
-# brief logging (no timestamps)
-brief=false
-
-#debug level, INFO is default
-debug=INFO
-
-#verbose print all log messages to screen (default to print only INFO and above to screen)
-verbose=false
-
-#exectype local|mapreduce, mapreduce is default
-exectype=mapreduce
-
-#Enable insertion of information about script into hadoop job conf 
-pig.script.info.enabled=true
-
-#Do not spill temp files smaller than this size (bytes)
-pig.spill.size.threshold=5000000
-#EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
-#This should help reduce the number of files being spilled.
-pig.spill.gc.activation.size=40000000
-
-#the following two parameters are to help estimate the reducer number
-pig.exec.reducers.bytes.per.reducer=1000000000
-pig.exec.reducers.max=999
-
-#Temporary location to store the intermediate data.
-pig.temp.dir=/tmp/
-
-#Threshold for merging FRJoin fragment files
-pig.files.concatenation.threshold=100
-pig.optimistic.files.concatenation=false;
-
-pig.disable.counter=false

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/PIG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/PIG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/PIG/metainfo.xml
deleted file mode 100644
index 44e9cda..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/PIG/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Scripting platform for analyzing large datasets</comment>
-    <version>0.12.0.2.0.6.0</version>
-
-    <components>
-        <component>
-            <name>PIG</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/SQOOP/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/SQOOP/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/SQOOP/metainfo.xml
deleted file mode 100644
index 9a50700..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/SQOOP/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Tool for transferring bulk data between Apache Hadoop and structured data stores such as relational databases</comment>
-    <version>1.4.4.2.0.6.0</version>
-
-    <components>
-        <component>
-            <name>SQOOP</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/WEBHCAT/configuration/webhcat-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/WEBHCAT/configuration/webhcat-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/WEBHCAT/configuration/webhcat-site.xml
deleted file mode 100644
index 775632f..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/WEBHCAT/configuration/webhcat-site.xml
+++ /dev/null
@@ -1,129 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- 
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<!-- The default settings for Templeton. -->
-<!-- Edit templeton-site.xml to change settings for your local -->
-<!-- install. -->
-
-<configuration>
-
-  <property>
-    <name>templeton.port</name>
-    <value>50111</value>
-    <description>The HTTP port for the main server.</description>
-  </property>
-
-  <property>
-    <name>templeton.hadoop.conf.dir</name>
-    <value>/etc/hadoop/conf</value>
-    <description>The path to the Hadoop configuration.</description>
-  </property>
-
-  <property>
-    <name>templeton.jar</name>
-    <value>/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar</value>
-    <description>The path to the Templeton jar file.</description>
-  </property>
-
-  <property>
-    <name>templeton.libjars</name>
-    <value>/usr/lib/zookeeper/zookeeper.jar</value>
-    <description>Jars to add the the classpath.</description>
-  </property>
-
-
-  <property>
-    <name>templeton.hadoop</name>
-    <value>/usr/bin/hadoop</value>
-    <description>The path to the Hadoop executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.archive</name>
-    <value>hdfs:///apps/webhcat/pig.tar.gz</value>
-    <description>The path to the Pig archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.path</name>
-    <value>pig.tar.gz/pig/bin/pig</value>
-    <description>The path to the Pig executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hcat</name>
-    <value>/usr/bin/hcat</value>
-    <description>The path to the hcatalog executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.archive</name>
-    <value>hdfs:///apps/webhcat/hive.tar.gz</value>
-    <description>The path to the Hive archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.path</name>
-    <value>hive.tar.gz/hive/bin/hive</value>
-    <description>The path to the Hive executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.properties</name>
-    <value></value>
-
-
-
-    <description>Properties to set when running hive.</description>
-  </property>
-
-
-  <property>
-    <name>templeton.zookeeper.hosts</name>
-    <value>localhost:2181</value>
-    <description>ZooKeeper servers, as comma separated host:port pairs</description>
-  </property>
-
-  <property>
-    <name>templeton.storage.class</name>
-    <value>org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage</value>
-    <description>The class to use as storage</description>
-  </property>
-
-  <property>
-    <name>templeton.override.enabled</name>
-    <value>false</value>
-    <description>
-      Enable the override path in templeton.override.jars
-    </description>
-  </property>
-
-  <property>
-    <name>templeton.streaming.jar</name>
-    <value>hdfs:///apps/webhcat/hadoop-streaming.jar</value>
-    <description>The hdfs path to the Hadoop streaming jar file.</description>
-  </property>
-
-  <property>
-    <name>templeton.exec.timeout</name>
-    <value>60000</value>
-    <description>Time out for templeton api</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/WEBHCAT/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/WEBHCAT/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/WEBHCAT/metainfo.xml
deleted file mode 100644
index 91267e4..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/WEBHCAT/metainfo.xml
+++ /dev/null
@@ -1,31 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for WEBHCAT service</comment>
-    <version>0.12.0.2.0.6.0</version>
-
-    <components>
-        <component>
-            <name>WEBHCAT_SERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-
-</metainfo>


[05/14] AMBARI-3777. Remove HDPLocal stack from stack definition. (yusaku)

Posted by yu...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/MAPREDUCE/configuration/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/MAPREDUCE/configuration/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/MAPREDUCE/configuration/mapred-site.xml
deleted file mode 100644
index 56eeff5..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/MAPREDUCE/configuration/mapred-site.xml
+++ /dev/null
@@ -1,573 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-  <!-- i/o properties -->
-
-  <property>
-    <name>io.sort.mb</name>
-    <value>200</value>
-    <description>
-      The total amount of Map-side buffer memory to use while sorting files
-    </description>
-  </property>
-
-  <property>
-    <name>io.sort.record.percent</name>
-    <value>.2</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>io.sort.spill.percent</name>
-    <value>0.9</value>
-    <description>Percentage of sort buffer used for record collection</description>
-  </property>
-
-  <property>
-    <name>io.sort.factor</name>
-    <value>100</value>
-    <description>No description</description>
-  </property>
-
-  <!-- map/reduce properties -->
-
-  <property>
-    <name>mapred.tasktracker.tasks.sleeptime-before-sigkill</name>
-    <value>250</value>
-    <description>Normally, this is the amount of time before killing
-      processes, and the recommended-default is 5.000 seconds - a value of
-      5000 here.  In this case, we are using it solely to blast tasks before
-      killing them, and killing them very quickly (1/4 second) to guarantee
-      that we do not leave VMs around for later jobs.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker.handler.count</name>
-    <value>50</value>
-    <description>
-      The number of server threads for the JobTracker. This should be roughly
-      4% of the number of tasktracker nodes.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.system.dir</name>
-    <value>/mapred/system</value>
-    <description>Path on the HDFS where where the MapReduce framework stores system files</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker</name>
-    <!-- cluster variant -->
-    <value>localhost:50300</value>
-    <description>JobTracker address</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker.http.address</name>
-    <!-- cluster variant -->
-    <value>localhost:50030</value>
-    <description>JobTracker host and http port address</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <!-- cluster specific -->
-    <name>mapred.local.dir</name>
-    <value>/hadoop/mapred</value>
-    <description>No description</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapreduce.cluster.administrators</name>
-    <value> hadoop</value>
-  </property>
-
-  <property>
-    <name>mapred.reduce.parallel.copies</name>
-    <value>30</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.tasktracker.map.tasks.maximum</name>
-    <value>4</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.tasktracker.reduce.tasks.maximum</name>
-    <value>2</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>tasktracker.http.threads</name>
-    <value>50</value>
-  </property>
-
-  <property>
-    <name>mapred.map.tasks.speculative.execution</name>
-    <value>false</value>
-    <description>If true, then multiple instances of some map tasks
-      may be executed in parallel.</description>
-  </property>
-
-  <property>
-    <name>mapred.reduce.tasks.speculative.execution</name>
-    <value>false</value>
-    <description>If true, then multiple instances of some reduce tasks
-      may be executed in parallel.</description>
-  </property>
-
-  <property>
-    <name>mapred.reduce.slowstart.completed.maps</name>
-    <value>0.05</value>
-  </property>
-
-  <property>
-    <name>mapred.inmem.merge.threshold</name>
-    <value>1000</value>
-    <description>The threshold, in terms of the number of files
-      for the in-memory merge process. When we accumulate threshold number of files
-      we initiate the in-memory merge and spill to disk. A value of 0 or less than
-      0 indicates we want to DON'T have any threshold and instead depend only on
-      the ramfs's memory consumption to trigger the merge.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.shuffle.merge.percent</name>
-    <value>0.66</value>
-    <description>The usage threshold at which an in-memory merge will be
-      initiated, expressed as a percentage of the total memory allocated to
-      storing in-memory map outputs, as defined by
-      mapred.job.shuffle.input.buffer.percent.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.shuffle.input.buffer.percent</name>
-    <value>0.7</value>
-    <description>The percentage of memory to be allocated from the maximum heap
-      size to storing map outputs during the shuffle.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.map.output.compression.codec</name>
-    <value>org.apache.hadoop.io.compress.SnappyCodec</value>
-    <description>If the map outputs are compressed, how should they be
-      compressed
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.output.compression.type</name>
-    <value>BLOCK</value>
-    <description>If the job outputs are to compressed as SequenceFiles, how should
-      they be compressed? Should be one of NONE, RECORD or BLOCK.
-    </description>
-  </property>
-
-
-  <property>
-    <name>mapred.jobtracker.completeuserjobs.maximum</name>
-    <value>5</value>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.taskScheduler</name>
-    <value>org.apache.hadoop.mapred.CapacityTaskScheduler</value>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.restart.recover</name>
-    <value>false</value>
-    <description>"true" to enable (job) recovery upon restart,
-      "false" to start afresh
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.reduce.input.buffer.percent</name>
-    <value>0.0</value>
-    <description>The percentage of memory- relative to the maximum heap size- to
-      retain map outputs during the reduce. When the shuffle is concluded, any
-      remaining map outputs in memory must consume less than this threshold before
-      the reduce can begin.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.input.limit</name>
-    <value>10737418240</value>
-    <description>The limit on the input size of the reduce. (This value
-      is 10 Gb.)  If the estimated input size of the reduce is greater than
-      this value, job is failed. A value of -1 means that there is no limit
-      set. </description>
-  </property>
-
-
-  <!-- copied from kryptonite configuration -->
-  <property>
-    <name>mapred.compress.map.output</name>
-    <value></value>
-  </property>
-
-
-  <property>
-    <name>mapred.task.timeout</name>
-    <value>600000</value>
-    <description>The number of milliseconds before a task will be
-      terminated if it neither reads an input, writes an output, nor
-      updates its status string.
-    </description>
-  </property>
-
-  <property>
-    <name>jetty.connector</name>
-    <value>org.mortbay.jetty.nio.SelectChannelConnector</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.task.tracker.task-controller</name>
-    <value>org.apache.hadoop.mapred.DefaultTaskController</value>
-    <description>
-      TaskController which is used to launch and manage task execution.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.child.root.logger</name>
-    <value>INFO,TLA</value>
-  </property>
-
-  <property>
-    <name>mapred.child.java.opts</name>
-    <value></value>
-
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.cluster.map.memory.mb</name>
-    <value>1536</value>
-    <description>
-      The virtual memory size of a single Map slot in the MapReduce framework
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.cluster.reduce.memory.mb</name>
-    <value>2048</value>
-    <description>
-      The virtual memory size of a single Reduce slot in the MapReduce framework
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.map.memory.mb</name>
-    <value>1536</value>
-    <description>
-      Virtual memory for single Map task
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.reduce.memory.mb</name>
-    <value>2048</value>
-    <description>
-      Virtual memory for single Reduce task
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.cluster.max.map.memory.mb</name>
-    <value>6144</value>
-    <description>
-      Upper limit on virtual memory size for a single Map task of any MapReduce job
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.cluster.max.reduce.memory.mb</name>
-    <value>4096</value>
-    <description>
-      Upper limit on virtual memory size for a single Reduce task of any MapReduce job
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.hosts</name>
-    <value>/etc/hadoop/conf/mapred.include</value>
-    <description>
-      Names a file that contains the list of nodes that may
-      connect to the jobtracker.  If the value is empty, all hosts are
-      permitted.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.hosts.exclude</name>
-    <value>/etc/hadoop/conf/mapred.exclude</value>
-    <description>
-      Names a file that contains the list of hosts that
-      should be excluded by the jobtracker.  If the value is empty, no
-      hosts are excluded.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.max.tracker.blacklists</name>
-    <value>16</value>
-    <description>
-      if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.healthChecker.script.path</name>
-    <value>file:////mapred/jobstatus</value>
-    <description>
-      Directory path to view job status
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.healthChecker.interval</name>
-    <value>135000</value>
-  </property>
-
-  <property>
-    <name>mapred.healthChecker.script.timeout</name>
-    <value>60000</value>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker.persist.jobstatus.active</name>
-    <value>false</value>
-    <description>Indicates if persistency of job status information is
-      active or not.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker.persist.jobstatus.hours</name>
-    <value>1</value>
-    <description>The number of hours job status information is persisted in DFS.
-      The job status information will be available after it drops of the memory
-      queue and between jobtracker restarts. With a zero value the job status
-      information is not persisted at all in DFS.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker.persist.jobstatus.dir</name>
-    <value>/etc/hadoop/conf/health_check</value>
-    <description>The directory where the job status information is persisted
-      in a file system to be available after it drops of the memory queue and
-      between jobtracker restarts.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.retirejob.check</name>
-    <value>10000</value>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.retirejob.interval</name>
-    <value>21600000</value>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker.history.completed.location</name>
-    <value>/mapred/history/done</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.task.maxvmem</name>
-    <value></value>
-    <final>true</final>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.maxtasks.per.job</name>
-    <value>-1</value>
-    <final>true</final>
-    <description>The maximum number of tasks for a single job.
-      A value of -1 indicates that there is no maximum.  </description>
-  </property>
-
-  <property>
-    <name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name>
-    <value>false</value>
-  </property>
-
-  <property>
-    <name>mapred.userlog.retain.hours</name>
-    <value>24</value>
-    <description>
-      The maximum time, in hours, for which the user-logs are to be retained after the job completion.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.reuse.jvm.num.tasks</name>
-    <value>1</value>
-    <description>
-      How many tasks to run per jvm. If set to -1, there is no limit
-    </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapreduce.jobtracker.kerberos.principal</name>
-    <value></value>
-    <description>
-      JT user name key.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.tasktracker.kerberos.principal</name>
-    <value></value>
-    <description>
-      tt user name key. "_HOST" is replaced by the host name of the task tracker.
-    </description>
-  </property>
-
-
-  <property>
-    <name>hadoop.job.history.user.location</name>
-    <value>none</value>
-    <final>true</final>
-  </property>
-
-
-  <property>
-    <name>mapreduce.jobtracker.keytab.file</name>
-    <value></value>
-    <description>
-      The keytab for the jobtracker principal.
-    </description>
-
-  </property>
-
-  <property>
-    <name>mapreduce.tasktracker.keytab.file</name>
-    <value></value>
-    <description>The filename of the keytab for the task tracker</description>
-  </property>
-
-  <property>
-    <name>mapred.task.tracker.http.address</name>
-    <value></value>
-    <description>Http address for task tracker.</description>
-  </property>
-
-  <property>
-    <name>mapreduce.jobtracker.staging.root.dir</name>
-    <value>/user</value>
-    <description>The Path prefix for where the staging directories should be placed. The next level is always the user's
-      name. It is a path in the default file system.</description>
-  </property>
-
-  <property>
-    <name>mapreduce.tasktracker.group</name>
-    <value>hadoop</value>
-    <description>The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.</description>
-
-  </property>
-
-  <property>
-    <name>mapreduce.jobtracker.split.metainfo.maxsize</name>
-    <value>50000000</value>
-    <final>true</final>
-    <description>If the size of the split metainfo file is larger than this, the JobTracker will fail the job during
-      initialize.
-    </description>
-  </property>
-  <property>
-    <name>mapreduce.history.server.embedded</name>
-    <value>false</value>
-    <description>Should job history server be embedded within Job tracker
-      process</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapreduce.history.server.http.address</name>
-    <!-- cluster variant -->
-    <value>localhost:51111</value>
-    <description>Http address of the history server</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapreduce.jobhistory.kerberos.principal</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>Job history user name key. (must map to same user as JT
-      user)</description>
-  </property>
-
-  <property>
-    <name>mapreduce.jobhistory.keytab.file</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>The keytab for the job history server principal.</description>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.blacklist.fault-timeout-window</name>
-    <value>180</value>
-    <description>
-      3-hour sliding window (value is in minutes)
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.blacklist.fault-bucket-width</name>
-    <value>15</value>
-    <description>
-      15-minute bucket size (value is in minutes)
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.queue.names</name>
-    <value>default</value>
-    <description> Comma separated list of queues configured for this jobtracker.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/MAPREDUCE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/MAPREDUCE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/MAPREDUCE/metainfo.xml
deleted file mode 100644
index b3eb72a..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/MAPREDUCE/metainfo.xml
+++ /dev/null
@@ -1,41 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>mapred</user>
-    <comment>Apache Hadoop Distributed Processing Framework</comment>
-    <version>1.2.0.1.3.3.0</version>
-
-    <components>
-        <component>
-            <name>JOBTRACKER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>TASKTRACKER</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>MAPREDUCE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/NAGIOS/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/NAGIOS/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/NAGIOS/configuration/global.xml
deleted file mode 100644
index 61a2b90..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/NAGIOS/configuration/global.xml
+++ /dev/null
@@ -1,50 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>nagios_user</name>
-    <value>nagios</value>
-    <description>Nagios Username.</description>
-  </property>
-  <property>
-    <name>nagios_group</name>
-    <value>nagios</value>
-    <description>Nagios Group.</description>
-  </property>
-  <property>
-    <name>nagios_web_login</name>
-    <value>nagiosadmin</value>
-    <description>Nagios web user.</description>
-  </property>
-  <property>
-    <name>nagios_web_password</name>
-    <value></value>
-    <description>Nagios Admin Password.</description>
-  </property>
-  <property>
-    <name>nagios_contact</name>
-    <value></value>
-    <description>Hadoop Admin Email.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/NAGIOS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/NAGIOS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/NAGIOS/metainfo.xml
deleted file mode 100644
index 76471cf..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/NAGIOS/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Nagios Monitoring and Alerting system</comment>
-    <version>3.5.0</version>
-
-    <components>
-        <component>
-            <name>NAGIOS_SERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/OOZIE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/OOZIE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/OOZIE/configuration/global.xml
deleted file mode 100644
index ddbf780..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/OOZIE/configuration/global.xml
+++ /dev/null
@@ -1,105 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>oozie_user</name>
-    <value>oozie</value>
-    <description>Oozie User.</description>
-  </property>
-  <property>
-    <name>oozieserver_host</name>
-    <value></value>
-    <description>Oozie Server Host.</description>
-  </property>
-  <property>
-    <name>oozie_database</name>
-    <value></value>
-    <description>Oozie Server Database.</description>
-  </property>
-  <property>
-    <name>oozie_derby_database</name>
-    <value>Derby</value>
-    <description>Oozie Derby Database.</description>
-  </property>
-  <property>
-    <name>oozie_existing_mysql_database</name>
-    <value>MySQL</value>
-    <description>Oozie MySQL Database.</description>
-  </property>
-  <property>
-    <name>oozie_existing_mysql_host</name>
-    <value></value>
-    <description>Existing MySQL Host.</description>
-  </property>
-  <property>
-    <name>oozie_existing_oracle_database</name>
-    <value>Oracle</value>
-    <description>Oracle Database</description>
-  </property>
-  <property>
-    <name>oozie_existing_oracle_host</name>
-    <value></value>
-    <description>Database Host.</description>
-  </property>
-  <property>
-    <name>oozie_ambari_database</name>
-    <value>MySQL</value>
-    <description>Database default.</description>
-  </property>
-  <property>
-    <name>oozie_ambari_host</name>
-    <value></value>
-    <description>Host on which databse will be created.</description>
-  </property>
-  <property>
-    <name>oozie_database_name</name>
-    <value>oozie</value>
-    <description>Database name used for the Oozie.</description>
-  </property>
-  <property>
-    <name>oozie_metastore_user_name</name>
-    <value>oozie</value>
-    <description>Database user name to use to connect to the database</description>
-  </property>
-  <property>
-    <name>oozie_metastore_user_passwd</name>
-    <value></value>
-    <description>Database password to use to connect to the database</description>
-  </property>
-  <property>
-    <name>oozie_data_dir</name>
-    <value>/hadoop/oozie/data</value>
-    <description>Data directory in which the Oozie DB exists</description>
-  </property>
-  <property>
-    <name>oozie_log_dir</name>
-    <value>/var/log/oozie</value>
-    <description>Directory for oozie logs</description>
-  </property>
-  <property>
-    <name>oozie_pid_dir</name>
-    <value>/var/run/oozie</value>
-    <description>Directory in which the pid files for oozie reside.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/OOZIE/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/OOZIE/configuration/oozie-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/OOZIE/configuration/oozie-site.xml
deleted file mode 100644
index 1f83735..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/OOZIE/configuration/oozie-site.xml
+++ /dev/null
@@ -1,237 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-        
-       http://www.apache.org/licenses/LICENSE-2.0
-  
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<configuration>
-
-  <!--
-      Refer to the oozie-default.xml file for the complete list of
-      Oozie configuration properties and their default values.
-  -->
-  <property>
-    <name>oozie.base.url</name>
-    <value>http://localhost:11000/oozie</value>
-    <description>Base Oozie URL.</description>
-  </property>
-
-  <property>
-    <name>oozie.system.id</name>
-    <value>oozie-${user.name}</value>
-    <description>
-      The Oozie system ID.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.systemmode</name>
-    <value>NORMAL</value>
-    <description>
-      System mode for  Oozie at startup.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.AuthorizationService.authorization.enabled</name>
-    <value>true</value>
-    <description>
-      Specifies whether security (user name/admin role) is enabled or not.
-      If disabled any user can manage Oozie system and manage any job.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.PurgeService.older.than</name>
-    <value>30</value>
-    <description>
-      Jobs older than this value, in days, will be purged by the PurgeService.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.PurgeService.purge.interval</name>
-    <value>3600</value>
-    <description>
-      Interval at which the purge service will run, in seconds.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.CallableQueueService.queue.size</name>
-    <value>1000</value>
-    <description>Max callable queue size</description>
-  </property>
-
-  <property>
-    <name>oozie.service.CallableQueueService.threads</name>
-    <value>10</value>
-    <description>Number of threads used for executing callables</description>
-  </property>
-
-  <property>
-    <name>oozie.service.CallableQueueService.callable.concurrency</name>
-    <value>3</value>
-    <description>
-      Maximum concurrency for a given callable type.
-      Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
-      Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
-      All commands that use action executors (action-start, action-end, action-kill and action-check) use
-      the action type as the callable type.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.coord.normal.default.timeout</name>
-    <value>120</value>
-    <description>Default timeout for a coordinator action input check (in minutes) for normal job.
-      -1 means infinite timeout</description>
-  </property>
-
-  <property>
-    <name>oozie.db.schema.name</name>
-    <value>oozie</value>
-    <description>
-      Oozie DataBase Name
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.HadoopAccessorService.jobTracker.whitelist</name>
-    <value> </value>
-    <description>
-      Whitelisted job tracker for Oozie service.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.authentication.type</name>
-    <value>simple</value>
-    <description>
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.HadoopAccessorService.nameNode.whitelist</name>
-    <value> </value>
-    <description>
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.WorkflowAppService.system.libpath</name>
-    <value>/user/${user.name}/share/lib</value>
-    <description>
-      System library path to use for workflow applications.
-      This path is added to workflow application if their job properties sets
-      the property 'oozie.use.system.libpath' to true.
-    </description>
-  </property>
-
-  <property>
-    <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
-    <value>false</value>
-    <description>
-      If set to true, submissions of MapReduce and Pig jobs will include
-      automatically the system library path, thus not requiring users to
-      specify where the Pig JAR files are. Instead, the ones from the system
-      library path are used.
-    </description>
-  </property>
-  <property>
-    <name>oozie.authentication.kerberos.name.rules</name>
-    <value>DEFAULT</value>
-    <description>The mapping from kerberos principal names to local OS user names.</description>
-  </property>
-  <property>
-    <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
-    <value>*=/etc/hadoop/conf</value>
-    <description>
-      Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
-      the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
-      used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
-      the relevant Hadoop *-site.xml files. If the path is relative is looked within
-      the Oozie configuration directory; though the path can be absolute (i.e. to point
-      to Hadoop client conf/ directories in the local filesystem.
-    </description>
-  </property>
-  <property>
-    <name>oozie.service.ActionService.executor.ext.classes</name>
-    <value>org.apache.oozie.action.email.EmailActionExecutor,
-      org.apache.oozie.action.hadoop.HiveActionExecutor,
-      org.apache.oozie.action.hadoop.ShellActionExecutor,
-      org.apache.oozie.action.hadoop.SqoopActionExecutor,
-      org.apache.oozie.action.hadoop.DistcpActionExecutor</value>
-  </property>
-
-  <property>
-    <name>oozie.service.SchemaService.wf.ext.schemas</name>
-    <value>shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd</value>
-  </property>
-  <property>
-    <name>oozie.service.JPAService.create.db.schema</name>
-    <value>false</value>
-    <description>
-      Creates Oozie DB.
-
-      If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
-      If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.JPAService.jdbc.driver</name>
-    <value>org.apache.derby.jdbc.EmbeddedDriver</value>
-    <description>
-      JDBC driver class.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.JPAService.jdbc.url</name>
-    <value>jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true</value>
-    <description>
-      JDBC URL.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.JPAService.jdbc.username</name>
-    <value>oozie</value>
-    <description>
-      DB user name.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.JPAService.jdbc.password</name>
-    <value> </value>
-    <description>
-      DB user password.
-
-      IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
-      if empty Configuration assumes it is NULL.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.JPAService.pool.max.active.conn</name>
-    <value>10</value>
-    <description>
-      Max number of connections.
-    </description>
-  </property>
-</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/OOZIE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/OOZIE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/OOZIE/metainfo.xml
deleted file mode 100644
index bc33c0b..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/OOZIE/metainfo.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>System for workflow coordination and execution of Apache Hadoop jobs.  This also includes the installation of the optional Oozie Web Console which relies on and will install the &lt;a target="_blank" href="http://www.sencha.com/products/extjs/license/"&gt;ExtJS&lt;/a&gt; Library.</comment>
-    <version>3.3.2.1.3.3.0</version>
-
-    <components>
-        <component>
-            <name>OOZIE_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>OOZIE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/PIG/configuration/pig.properties
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/PIG/configuration/pig.properties b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/PIG/configuration/pig.properties
deleted file mode 100644
index 01000b5..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/PIG/configuration/pig.properties
+++ /dev/null
@@ -1,52 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Pig default configuration file. All values can be overwritten by pig.properties and command line arguments.
-# see bin/pig -help
-
-# brief logging (no timestamps)
-brief=false
-
-#debug level, INFO is default
-debug=INFO
-
-#verbose print all log messages to screen (default to print only INFO and above to screen)
-verbose=false
-
-#exectype local|mapreduce, mapreduce is default
-exectype=mapreduce
-
-#Enable insertion of information about script into hadoop job conf 
-pig.script.info.enabled=true
-
-#Do not spill temp files smaller than this size (bytes)
-pig.spill.size.threshold=5000000
-#EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
-#This should help reduce the number of files being spilled.
-pig.spill.gc.activation.size=40000000
-
-#the following two parameters are to help estimate the reducer number
-pig.exec.reducers.bytes.per.reducer=1000000000
-pig.exec.reducers.max=999
-
-#Temporary location to store the intermediate data.
-pig.temp.dir=/tmp/
-
-#Threshold for merging FRJoin fragment files
-pig.files.concatenation.threshold=100
-pig.optimistic.files.concatenation=false;
-
-pig.disable.counter=false

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/PIG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/PIG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/PIG/metainfo.xml
deleted file mode 100644
index d29d56d..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/PIG/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Scripting platform for analyzing large datasets</comment>
-    <version>0.11.1.1.3.3.0</version>
-
-    <components>
-        <component>
-            <name>PIG</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/SQOOP/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/SQOOP/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/SQOOP/metainfo.xml
deleted file mode 100644
index ccf40b4..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/SQOOP/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Tool for transferring bulk data between Apache Hadoop and structured data stores such as relational databases</comment>
-    <version>1.4.3.1.3.3.0</version>
-
-    <components>
-        <component>
-            <name>SQOOP</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/WEBHCAT/configuration/webhcat-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/WEBHCAT/configuration/webhcat-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/WEBHCAT/configuration/webhcat-site.xml
deleted file mode 100644
index cc30c7a..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/WEBHCAT/configuration/webhcat-site.xml
+++ /dev/null
@@ -1,126 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- 
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<!-- The default settings for Templeton. -->
-<!-- Edit templeton-site.xml to change settings for your local -->
-<!-- install. -->
-
-<configuration>
-
-  <property>
-    <name>templeton.port</name>
-    <value>50111</value>
-    <description>The HTTP port for the main server.</description>
-  </property>
-
-  <property>
-    <name>templeton.hadoop.conf.dir</name>
-    <value>/etc/hadoop/conf</value>
-    <description>The path to the Hadoop configuration.</description>
-  </property>
-
-  <property>
-    <name>templeton.jar</name>
-    <value>/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar</value>
-    <description>The path to the Templeton jar file.</description>
-  </property>
-
-  <property>
-    <name>templeton.libjars</name>
-    <value>/usr/lib/zookeeper/zookeeper.jar</value>
-    <description>Jars to add the the classpath.</description>
-  </property>
-
-
-  <property>
-    <name>templeton.hadoop</name>
-    <value>/usr/bin/hadoop</value>
-    <description>The path to the Hadoop executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.archive</name>
-    <value>hdfs:///apps/webhcat/pig.tar.gz</value>
-    <description>The path to the Pig archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.path</name>
-    <value>pig.tar.gz/pig/bin/pig</value>
-    <description>The path to the Pig executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hcat</name>
-    <value>/usr/bin/hcat</value>
-    <description>The path to the hcatalog executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.archive</name>
-    <value>hdfs:///apps/webhcat/hive.tar.gz</value>
-    <description>The path to the Hive archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.path</name>
-    <value>hive.tar.gz/hive/bin/hive</value>
-    <description>The path to the Hive executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.properties</name>
-    <value></value>
-    <description>Properties to set when running hive.</description>
-  </property>
-
-
-  <property>
-    <name>templeton.zookeeper.hosts</name>
-    <value>localhost:2181</value>
-    <description>ZooKeeper servers, as comma separated host:port pairs</description>
-  </property>
-
-  <property>
-    <name>templeton.storage.class</name>
-    <value>org.apache.hcatalog.templeton.tool.ZooKeeperStorage</value>
-    <description>The class to use as storage</description>
-  </property>
-
-  <property>
-    <name>templeton.override.enabled</name>
-    <value>false</value>
-    <description>
-      Enable the override path in templeton.override.jars
-    </description>
-  </property>
-
-  <property>
-    <name>templeton.streaming.jar</name>
-    <value>hdfs:///apps/webhcat/hadoop-streaming.jar</value>
-    <description>The hdfs path to the Hadoop streaming jar file.</description>
-  </property>
-
-  <property>
-    <name>templeton.exec.timeout</name>
-    <value>60000</value>
-    <description>Time out for templeton api</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/WEBHCAT/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/WEBHCAT/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/WEBHCAT/metainfo.xml
deleted file mode 100644
index 10babb1..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/WEBHCAT/metainfo.xml
+++ /dev/null
@@ -1,31 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for WEBHCAT service</comment>
-    <version>0.11.0.1.3.3.0</version>
-
-    <components>
-        <component>
-            <name>WEBHCAT_SERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/ZOOKEEPER/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/ZOOKEEPER/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/ZOOKEEPER/configuration/global.xml
deleted file mode 100644
index f78df89..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/ZOOKEEPER/configuration/global.xml
+++ /dev/null
@@ -1,75 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>zk_user</name>
-    <value>zookeeper</value>
-    <description>ZooKeeper User.</description>
-  </property>
-  <property>
-    <name>zookeeperserver_host</name>
-    <value></value>
-    <description>ZooKeeper Server Hosts.</description>
-  </property>
-  <property>
-    <name>zk_data_dir</name>
-    <value>/hadoop/zookeeper</value>
-    <description>Data directory for ZooKeeper.</description>
-  </property>
-  <property>
-    <name>zk_log_dir</name>
-    <value>/var/log/zookeeper</value>
-    <description>ZooKeeper Log Dir</description>
-  </property>
-  <property>
-    <name>zk_pid_dir</name>
-    <value>/var/run/zookeeper</value>
-    <description>ZooKeeper Pid Dir</description>
-  </property>
-  <property>
-    <name>zk_pid_file</name>
-    <value>/var/run/zookeeper/zookeeper_server.pid</value>
-    <description>ZooKeeper Pid File</description>
-  </property>
-  <property>
-    <name>tickTime</name>
-    <value>2000</value>
-    <description>The length of a single tick in milliseconds, which is the basic time unit used by ZooKeeper</description>
-  </property>
-  <property>
-    <name>initLimit</name>
-    <value>10</value>
-    <description>Ticks to allow for sync at Init.</description>
-  </property>
-  <property>
-    <name>syncLimit</name>
-    <value>5</value>
-    <description>Ticks to allow for sync at Runtime.</description>
-  </property>
-  <property>
-    <name>clientPort</name>
-    <value>2181</value>
-    <description>Port for running ZK Server.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/ZOOKEEPER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/ZOOKEEPER/metainfo.xml
deleted file mode 100644
index 827f870..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/ZOOKEEPER/metainfo.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Centralized service which provides highly reliable distributed coordination</comment>
-    <version>3.4.5.1.3.3.0</version>
-
-    <components>
-        <component>
-            <name>ZOOKEEPER_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>ZOOKEEPER_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/metainfo.xml
deleted file mode 100644
index 17e8e38..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/metainfo.xml
+++ /dev/null
@@ -1,21 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <versions>
-    </versions>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/repos/repoinfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/repos/repoinfo.xml
deleted file mode 100644
index d50bf8c..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/repos/repoinfo.xml
+++ /dev/null
@@ -1,61 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<reposinfo>
-  <os type="centos6">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.5.0</baseurl>
-      <repoid>HDP-2.0.5</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="centos5">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos5/2.x/updates/2.0.5.0</baseurl>
-      <repoid>HDP-2.0.5</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="redhat6">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.5.0</baseurl>
-      <repoid>HDP-2.0.5</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="redhat5">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos5/2.x/updates/2.0.5.0</baseurl>
-      <repoid>HDP-2.0.5</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="suse11">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/suse11/2.x/updates/2.0.5.0</baseurl>
-      <repoid>HDP-2.0.5</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="sles11">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/suse11/2.x/updates/2.0.5.0</baseurl>
-      <repoid>HDP-2.0.5</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-</reposinfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/GANGLIA/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/GANGLIA/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/GANGLIA/metainfo.xml
deleted file mode 100644
index 9f7444b..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/GANGLIA/metainfo.xml
+++ /dev/null
@@ -1,36 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Ganglia Metrics Collection system</comment>
-    <version>3.5.0</version>
-
-    <components>
-        <component>
-            <name>GANGLIA_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>GANGLIA_MONITOR</name>
-            <category>SLAVE</category>
-        </component>
-
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HBASE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HBASE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HBASE/configuration/global.xml
deleted file mode 100644
index bc8a7d3..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HBASE/configuration/global.xml
+++ /dev/null
@@ -1,160 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>hbasemaster_host</name>
-    <value></value>
-    <description>HBase Master Host.</description>
-  </property>
-  <property>
-    <name>regionserver_hosts</name>
-    <value></value>
-    <description>Region Server Hosts</description>
-  </property>
-  <property>
-    <name>hbase_log_dir</name>
-    <value>/var/log/hbase</value>
-    <description>Log Directories for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_pid_dir</name>
-    <value>/var/run/hbase</value>
-    <description>Log Directories for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_log_dir</name>
-    <value>/var/log/hbase</value>
-    <description>Log Directories for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_regionserver_heapsize</name>
-    <value>1024</value>
-    <description>Log Directories for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_master_heapsize</name>
-    <value>1024</value>
-    <description>HBase Master Heap Size</description>
-  </property>
-  <property>
-    <name>hstore_compactionthreshold</name>
-    <value>3</value>
-    <description>HBase HStore compaction threshold.</description>
-  </property>
-  <property>
-    <name>hfile_blockcache_size</name>
-    <value>0.25</value>
-    <description>HFile block cache size.</description>
-  </property>
-  <property>
-    <name>hstorefile_maxsize</name>
-    <value>1073741824</value>
-    <description>Maximum HStoreFile Size</description>
-  </property>
-    <property>
-    <name>regionserver_handlers</name>
-    <value>30</value>
-    <description>HBase RegionServer Handler</description>
-  </property>
-    <property>
-    <name>hregion_majorcompaction</name>
-    <value>86400000</value>
-    <description>HBase Major Compaction.</description>
-  </property>
-    <property>
-    <name>hregion_blockmultiplier</name>
-    <value>2</value>
-    <description>HBase Region Block Multiplier</description>
-  </property>
-    <property>
-    <name>hregion_memstoreflushsize</name>
-    <value></value>
-    <description>HBase Region MemStore Flush Size.</description>
-  </property>
-    <property>
-    <name>client_scannercaching</name>
-    <value>100</value>
-    <description>Base Client Scanner Caching</description>
-  </property>
-    <property>
-    <name>zookeeper_sessiontimeout</name>
-    <value>60000</value>
-    <description>ZooKeeper Session Timeout</description>
-  </property>
-    <property>
-    <name>hfile_max_keyvalue_size</name>
-    <value>10485760</value>
-    <description>HBase Client Maximum key-value Size</description>
-  </property>
-  <property>
-    <name>hbase_hdfs_root_dir</name>
-    <value>/apps/hbase/data</value>
-    <description>HBase Relative Path to HDFS.</description>
-  </property>
-   <property>
-    <name>hbase_conf_dir</name>
-    <value>/etc/hbase</value>
-    <description>Config Directory for HBase.</description>
-  </property>
-   <property>
-    <name>hdfs_enable_shortcircuit_read</name>
-    <value>true</value>
-    <description>HDFS Short Circuit Read</description>
-  </property>
-   <property>
-    <name>hdfs_support_append</name>
-    <value>true</value>
-    <description>HDFS append support</description>
-  </property>
-   <property>
-    <name>hstore_blockingstorefiles</name>
-    <value>7</value>
-    <description>HStore blocking storefiles.</description>
-  </property>
-   <property>
-    <name>regionserver_memstore_lab</name>
-    <value>true</value>
-    <description>Region Server memstore.</description>
-  </property>
-   <property>
-    <name>regionserver_memstore_lowerlimit</name>
-    <value>0.35</value>
-    <description>Region Server memstore lower limit.</description>
-  </property>
-   <property>
-    <name>regionserver_memstore_upperlimit</name>
-    <value>0.4</value>
-    <description>Region Server memstore upper limit.</description>
-  </property>
-   <property>
-    <name>hbase_conf_dir</name>
-    <value>/etc/hbase</value>
-    <description>HBase conf dir.</description>
-  </property>
-   <property>
-    <name>hbase_user</name>
-    <value>hbase</value>
-    <description>HBase User Name.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HBASE/configuration/hbase-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HBASE/configuration/hbase-policy.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HBASE/configuration/hbase-policy.xml
deleted file mode 100644
index e45f23c..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HBASE/configuration/hbase-policy.xml
+++ /dev/null
@@ -1,53 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HRegionInterface protocol implementations (ie. 
-    clients talking to HRegionServers)
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.admin.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HMasterInterface protocol implementation (ie. 
-    clients talking to HMaster for admin operations).
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.masterregion.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HMasterRegionInterface protocol implementations
-    (for HRegionServers communicating with HMaster)
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HBASE/configuration/hbase-site.xml
deleted file mode 100644
index 9a8d738..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HBASE/configuration/hbase-site.xml
+++ /dev/null
@@ -1,364 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>hbase.rootdir</name>
-    <value></value>
-    <description>The directory shared by region servers and into
-    which HBase persists.  The URL should be 'fully-qualified'
-    to include the filesystem scheme.  For example, to specify the
-    HDFS directory '/hbase' where the HDFS instance's namenode is
-    running at namenode.example.org on port 9000, set this value to:
-    hdfs://namenode.example.org:9000/hbase.  By default HBase writes
-    into /tmp.  Change this configuration else all data will be lost
-    on machine restart.
-    </description>
-  </property>
-  <property>
-    <name>hbase.cluster.distributed</name>
-    <value>true</value>
-    <description>The mode the cluster will be in. Possible values are
-      false for standalone mode and true for distributed mode.  If
-      false, startup will run all HBase and ZooKeeper daemons together
-      in the one JVM.
-    </description>
-  </property>
-  <property>
-    <name>hbase.tmp.dir</name>
-    <value></value>
-    <description>Temporary directory on the local filesystem.
-    Change this setting to point to a location more permanent
-    than '/tmp' (The '/tmp' directory is often cleared on
-    machine restart).
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.bindAddress</name>
-    <value></value>
-    <description>The bind address for the HBase Master web UI
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.port</name>
-    <value></value>
-    <description>The port for the HBase Master web UI.</description>
-  </property>
-  <property>
-    <name>hbase.regionserver.info.port</name>
-    <value></value>
-    <description>The port for the HBase RegionServer web UI.</description>
-  </property>
-  <property>
-    <name>hbase.regionserver.global.memstore.upperLimit</name>
-    <value></value>
-    <description>Maximum size of all memstores in a region server before new
-      updates are blocked and flushes are forced. Defaults to 40% of heap
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.handler.count</name>
-    <value></value>
-    <description>Count of RPC Listener instances spun up on RegionServers.
-    Same property is used by the Master for count of master handlers.
-    Default is 10.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.majorcompaction</name>
-    <value></value>
-    <description>The time (in miliseconds) between 'major' compactions of all
-    HStoreFiles in a region.  Default: 1 day.
-    Set to 0 to disable automated major compactions.
-    </description>
-  </property>
-  
-  <property>
-    <name>hbase.regionserver.global.memstore.lowerLimit</name>
-    <value></value>
-    <description>When memstores are being forced to flush to make room in
-      memory, keep flushing until we hit this mark. Defaults to 35% of heap.
-      This value equal to hbase.regionserver.global.memstore.upperLimit causes
-      the minimum possible flushing to occur when updates are blocked due to
-      memstore limiting.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.block.multiplier</name>
-    <value></value>
-    <description>Block updates if memstore has hbase.hregion.memstore.block.multiplier
-    time hbase.hregion.flush.size bytes.  Useful preventing
-    runaway memstore during spikes in update traffic.  Without an
-    upper-bound, memstore fills such that when it flushes the
-    resultant flush files take a long time to compact or split, or
-    worse, we OOME
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.flush.size</name>
-    <value></value>
-    <description>
-    Memstore will be flushed to disk if size of the memstore
-    exceeds this number of bytes.  Value is checked by a thread that runs
-    every hbase.server.thread.wakefrequency.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.mslab.enabled</name>
-    <value></value>
-    <description>
-      Enables the MemStore-Local Allocation Buffer,
-      a feature which works to prevent heap fragmentation under
-      heavy write loads. This can reduce the frequency of stop-the-world
-      GC pauses on large heaps.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.max.filesize</name>
-    <value></value>
-    <description>
-    Maximum HStoreFile size. If any one of a column families' HStoreFiles has
-    grown to exceed this value, the hosting HRegion is split in two.
-    Default: 1G.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.scanner.caching</name>
-    <value></value>
-    <description>Number of rows that will be fetched when calling next
-    on a scanner if it is not served from (local, client) memory. Higher
-    caching values will enable faster scanners but will eat up more memory
-    and some calls of next may take longer and longer times when the cache is empty.
-    Do not set this value such that the time between invocations is greater
-    than the scanner timeout; i.e. hbase.regionserver.lease.period
-    </description>
-  </property>
-  <property>
-    <name>zookeeper.session.timeout</name>
-    <value>30000</value>
-    <description>ZooKeeper session timeout.
-      HBase passes this to the zk quorum as suggested maximum time for a
-      session (This setting becomes zookeeper's 'maxSessionTimeout').  See
-      http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
-      "The client sends a requested timeout, the server responds with the
-      timeout that it can give the client. " In milliseconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.keyvalue.maxsize</name>
-    <value></value>
-    <description>Specifies the combined maximum allowed size of a KeyValue
-    instance. This is to set an upper boundary for a single entry saved in a
-    storage file. Since they cannot be split it helps avoiding that a region
-    cannot be split any further because the data is too large. It seems wise
-    to set this to a fraction of the maximum region size. Setting it to zero
-    or less disables the check.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.compactionThreshold</name>
-    <value></value>
-    <description>
-    If more than this number of HStoreFiles in any one HStore
-    (one HStoreFile is written per flush of memstore) then a compaction
-    is run to rewrite all HStoreFiles files as one.  Larger numbers
-    put off compaction but when it runs, it takes longer to complete.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.blockingStoreFiles</name>
-    <value></value>
-    <description>
-    If more than this number of StoreFiles in any one Store
-    (one StoreFile is written per flush of MemStore) then updates are
-    blocked for this HRegion until a compaction is completed, or
-    until hbase.hstore.blockingWaitTime has been exceeded.
-    </description>
-  </property>
-  <property>
-    <name>hfile.block.cache.size</name>
-    <value></value>
-    <description>
-        Percentage of maximum heap (-Xmx setting) to allocate to block cache
-        used by HFile/StoreFile. Default of 0.25 means allocate 25%.
-        Set to 0 to disable but it's not recommended.
-    </description>
-  </property>
-
-  <!-- The following properties configure authentication information for
-       HBase processes when using Kerberos security.  There are no default
-       values, included here for documentation purposes -->
-  <property>
-    <name>hbase.master.keytab.file</name>
-    <value></value>
-    <description>Full path to the kerberos keytab file to use for logging in
-    the configured HMaster server principal.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.kerberos.principal</name>
-    <value></value>
-    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
-    that should be used to run the HMaster process.  The principal name should
-    be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the hostname
-    portion, it will be replaced with the actual hostname of the running
-    instance.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.keytab.file</name>
-    <value></value>
-    <description>Full path to the kerberos keytab file to use for logging in
-    the configured HRegionServer server principal.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.kerberos.principal</name>
-    <value></value>
-    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
-    that should be used to run the HRegionServer process.  The principal name
-    should be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the
-    hostname portion, it will be replaced with the actual hostname of the
-    running instance.  An entry for this principal must exist in the file
-    specified in hbase.regionserver.keytab.file
-    </description>
-  </property>
-
-  <!-- Additional configuration specific to HBase security -->
-  <property>
-    <name>hbase.superuser</name>
-    <value>hbase</value>
-    <description>List of users or groups (comma-separated), who are allowed
-    full privileges, regardless of stored ACLs, across the cluster.
-    Only used when HBase security is enabled.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.security.authentication</name>
-    <value>simple</value>
-  </property>
-
-  <property>
-    <name>hbase.rpc.engine</name>
-    <value>org.apache.hadoop.hbase.ipc.WritableRpcEngine</value>
-  </property>
-
-  <property>
-    <name>hbase.security.authorization</name>
-    <value>false</value>
-    <description>Enables HBase authorization. Set the value of this property to false to disable HBase authorization.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.region.classes</name>
-    <value></value>
-    <description>A comma-separated list of Coprocessors that are loaded by
-    default on all tables. For any override coprocessor method, these classes
-    will be called in order. After implementing your own Coprocessor, just put
-    it in HBase's classpath and add the fully qualified class name here.
-    A coprocessor can also be loaded on demand by setting HTableDescriptor.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.master.classes</name>
-    <value></value>
-    <description>A comma-separated list of
-      org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
-      loaded by default on the active HMaster process. For any implemented
-      coprocessor methods, the listed classes will be called in order. After
-      implementing your own MasterObserver, just put it in HBase's classpath
-      and add the fully qualified class name here.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.zookeeper.property.clientPort</name>
-    <value>2181</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-    The port at which the clients will connect.
-    </description>
-  </property>
-
-  <!--
-  The following three properties are used together to create the list of
-  host:peer_port:leader_port quorum servers for ZooKeeper.
-  -->
-  <property>
-    <name>hbase.zookeeper.quorum</name>
-    <value></value>
-    <description>Comma separated list of servers in the ZooKeeper Quorum.
-    For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
-    By default this is set to localhost for local and pseudo-distributed modes
-    of operation. For a fully-distributed setup, this should be set to a full
-    list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
-    this is the list of servers which we will start/stop ZooKeeper on.
-    </description>
-  </property>
-  <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
-
-  <property>
-    <name>dfs.support.append</name>
-    <value></value>
-    <description>Does HDFS allow appends to files?
-    This is an hdfs config. set in here so the hdfs client will do append support.
-    You must ensure that this config. is true serverside too when running hbase
-    (You will have to restart your cluster after setting it).
-    </description>
-  </property>
-
-
-  <property>
-    <name>hbase.zookeeper.useMulti</name>
-    <value>true</value>
-    <description>Instructs HBase to make use of ZooKeeper's multi-update functionality.
-    This allows certain ZooKeeper operations to complete more quickly and prevents some issues
-    with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).ยท
-    IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+
-    and will not be downgraded.  ZooKeeper versions before 3.4 do not support multi-update and will
-    not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).
-    </description>
-  </property>
-  <property>
-    <name>zookeeper.znode.parent</name>
-    <value>/hbase-unsecure</value>
-    <description>Root ZNode for HBase in ZooKeeper. All of HBase's ZooKeeper
-      files that are configured with a relative path will go under this node.
-      By default, all of HBase's ZooKeeper file path are configured with a
-      relative path, so they will all go under this directory unless changed.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.defaults.for.version.skip</name>
-    <value>true</value>
-    <description>Disables version verification.</description>
-  </property>
-
-  <property>
-    <name>dfs.domain.socket.path</name>
-    <value>/var/lib/hadoop-hdfs/dn_socket</value>
-    <description>Path to domain socket.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HBASE/metainfo.xml
deleted file mode 100644
index f627b48..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HBASE/metainfo.xml
+++ /dev/null
@@ -1,40 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>mapred</user>
-    <comment>Non-relational distributed database and centralized service for configuration management &amp; synchronization</comment>
-    <version>0.95.2.2.0.5.0</version>
-
-    <components>
-        <component>
-            <name>HBASE_MASTER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>HBASE_REGIONSERVER</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>HBASE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>


[14/14] git commit: AMBARI-3777. Remove HDPLocal stack from stack definition. (yusaku)

Posted by yu...@apache.org.
AMBARI-3777. Remove HDPLocal stack from stack definition. (yusaku)


Project: http://git-wip-us.apache.org/repos/asf/incubator-ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-ambari/commit/d3e1eab5
Tree: http://git-wip-us.apache.org/repos/asf/incubator-ambari/tree/d3e1eab5
Diff: http://git-wip-us.apache.org/repos/asf/incubator-ambari/diff/d3e1eab5

Branch: refs/heads/trunk
Commit: d3e1eab5b05579abced82015d6767c6745ca33c3
Parents: 6ccdcc8
Author: Yusaku Sako <yu...@hortonworks.com>
Authored: Fri Nov 15 11:12:14 2013 -0800
Committer: Yusaku Sako <yu...@hortonworks.com>
Committed: Fri Nov 15 11:12:14 2013 -0800

----------------------------------------------------------------------
 .../stacks/HDPLocal/1.2.0/repos/repoinfo.xml    |  87 ---
 .../1.2.0/services/GANGLIA/metainfo.xml         |  40 --
 .../HBASE/configuration/hbase-policy.xml        |  53 --
 .../services/HBASE/configuration/hbase-site.xml | 334 -----------
 .../HDPLocal/1.2.0/services/HBASE/metainfo.xml  |  40 --
 .../1.2.0/services/HCATALOG/metainfo.xml        |  30 -
 .../services/HDFS/configuration/core-site.xml   | 251 --------
 .../HDFS/configuration/hadoop-policy.xml        | 134 -----
 .../services/HDFS/configuration/hdfs-site.xml   | 415 --------------
 .../HDPLocal/1.2.0/services/HDFS/metainfo.xml   |  46 --
 .../services/HIVE/configuration/hive-site.xml   | 138 -----
 .../HDPLocal/1.2.0/services/HIVE/metainfo.xml   |  43 --
 .../configuration/capacity-scheduler.xml        | 195 -------
 .../MAPREDUCE/configuration/core-site.xml       |  20 -
 .../configuration/mapred-queue-acls.xml         |  39 --
 .../MAPREDUCE/configuration/mapred-site.xml     | 531 -----------------
 .../1.2.0/services/MAPREDUCE/metainfo.xml       |  41 --
 .../HDPLocal/1.2.0/services/NAGIOS/metainfo.xml |  30 -
 .../services/OOZIE/configuration/oozie-site.xml | 245 --------
 .../HDPLocal/1.2.0/services/OOZIE/metainfo.xml  |  35 --
 .../services/PIG/configuration/pig.properties   |  52 --
 .../HDPLocal/1.2.0/services/PIG/metainfo.xml    |  30 -
 .../HDPLocal/1.2.0/services/SQOOP/metainfo.xml  |  30 -
 .../WEBHCAT/configuration/webhcat-site.xml      | 126 ----
 .../1.2.0/services/WEBHCAT/metainfo.xml         |  31 -
 .../1.2.0/services/ZOOKEEPER/metainfo.xml       |  35 --
 .../stacks/HDPLocal/1.2.1/repos/repoinfo.xml    |  75 ---
 .../1.2.1/services/GANGLIA/metainfo.xml         |  40 --
 .../HBASE/configuration/hbase-policy.xml        |  53 --
 .../services/HBASE/configuration/hbase-site.xml | 345 -----------
 .../HDPLocal/1.2.1/services/HBASE/metainfo.xml  |  40 --
 .../1.2.1/services/HCATALOG/metainfo.xml        |  30 -
 .../services/HDFS/configuration/core-site.xml   | 251 --------
 .../HDFS/configuration/hadoop-policy.xml        | 134 -----
 .../services/HDFS/configuration/hdfs-site.xml   | 415 --------------
 .../HDPLocal/1.2.1/services/HDFS/metainfo.xml   |  46 --
 .../services/HIVE/configuration/hive-site.xml   | 138 -----
 .../HDPLocal/1.2.1/services/HIVE/metainfo.xml   |  43 --
 .../configuration/capacity-scheduler.xml        | 195 -------
 .../MAPREDUCE/configuration/core-site.xml       |  20 -
 .../configuration/mapred-queue-acls.xml         |  39 --
 .../MAPREDUCE/configuration/mapred-site.xml     | 531 -----------------
 .../1.2.1/services/MAPREDUCE/metainfo.xml       |  41 --
 .../HDPLocal/1.2.1/services/NAGIOS/metainfo.xml |  30 -
 .../services/OOZIE/configuration/oozie-site.xml | 245 --------
 .../HDPLocal/1.2.1/services/OOZIE/metainfo.xml  |  35 --
 .../services/PIG/configuration/pig.properties   |  52 --
 .../HDPLocal/1.2.1/services/PIG/metainfo.xml    |  30 -
 .../HDPLocal/1.2.1/services/SQOOP/metainfo.xml  |  30 -
 .../WEBHCAT/configuration/webhcat-site.xml      | 126 ----
 .../1.2.1/services/WEBHCAT/metainfo.xml         |  31 -
 .../1.2.1/services/ZOOKEEPER/metainfo.xml       |  35 --
 .../stacks/HDPLocal/1.3.0/metainfo.xml          |  22 -
 .../stacks/HDPLocal/1.3.0/repos/repoinfo.xml    |  75 ---
 .../services/FLUME/configuration/global.xml     |  24 -
 .../HDPLocal/1.3.0/services/FLUME/metainfo.xml  |  30 -
 .../services/GANGLIA/configuration/global.xml   |  55 --
 .../1.3.0/services/GANGLIA/metainfo.xml         |  40 --
 .../services/HBASE/configuration/global.xml     | 160 ------
 .../HBASE/configuration/hbase-policy.xml        |  53 --
 .../services/HBASE/configuration/hbase-site.xml | 367 ------------
 .../HDPLocal/1.3.0/services/HBASE/metainfo.xml  |  40 --
 .../services/HCATALOG/configuration/global.xml  |  45 --
 .../1.3.0/services/HCATALOG/metainfo.xml        |  30 -
 .../services/HDFS/configuration/core-site.xml   | 253 --------
 .../services/HDFS/configuration/global.xml      | 187 ------
 .../HDFS/configuration/hadoop-policy.xml        | 134 -----
 .../services/HDFS/configuration/hdfs-site.xml   | 454 ---------------
 .../HDPLocal/1.3.0/services/HDFS/metainfo.xml   |  46 --
 .../services/HIVE/configuration/global.xml      | 125 ----
 .../services/HIVE/configuration/hive-site.xml   | 243 --------
 .../HDPLocal/1.3.0/services/HIVE/metainfo.xml   |  43 --
 .../1.3.0/services/HUE/configuration/global.xml |  35 --
 .../services/HUE/configuration/hue-site.xml     | 290 ----------
 .../HDPLocal/1.3.0/services/HUE/metainfo.xml    |  31 -
 .../configuration/capacity-scheduler.xml        | 195 -------
 .../MAPREDUCE/configuration/core-site.xml       |  20 -
 .../services/MAPREDUCE/configuration/global.xml | 160 ------
 .../configuration/mapred-queue-acls.xml         |  39 --
 .../MAPREDUCE/configuration/mapred-site.xml     | 537 -----------------
 .../1.3.0/services/MAPREDUCE/metainfo.xml       |  41 --
 .../services/NAGIOS/configuration/global.xml    |  50 --
 .../HDPLocal/1.3.0/services/NAGIOS/metainfo.xml |  30 -
 .../services/OOZIE/configuration/global.xml     | 105 ----
 .../services/OOZIE/configuration/oozie-site.xml | 237 --------
 .../HDPLocal/1.3.0/services/OOZIE/metainfo.xml  |  35 --
 .../services/PIG/configuration/pig.properties   |  52 --
 .../HDPLocal/1.3.0/services/PIG/metainfo.xml    |  30 -
 .../HDPLocal/1.3.0/services/SQOOP/metainfo.xml  |  30 -
 .../WEBHCAT/configuration/webhcat-site.xml      | 126 ----
 .../1.3.0/services/WEBHCAT/metainfo.xml         |  31 -
 .../services/ZOOKEEPER/configuration/global.xml |  75 ---
 .../1.3.0/services/ZOOKEEPER/metainfo.xml       |  35 --
 .../stacks/HDPLocal/1.3.2/metainfo.xml          |  22 -
 .../stacks/HDPLocal/1.3.2/repos/repoinfo.xml    |  75 ---
 .../services/FLUME/configuration/global.xml     |  24 -
 .../HDPLocal/1.3.2/services/FLUME/metainfo.xml  |  30 -
 .../services/GANGLIA/configuration/global.xml   |  55 --
 .../1.3.2/services/GANGLIA/metainfo.xml         |  40 --
 .../services/HBASE/configuration/global.xml     | 160 ------
 .../HBASE/configuration/hbase-policy.xml        |  53 --
 .../services/HBASE/configuration/hbase-site.xml | 367 ------------
 .../HDPLocal/1.3.2/services/HBASE/metainfo.xml  |  40 --
 .../services/HCATALOG/configuration/global.xml  |  45 --
 .../1.3.2/services/HCATALOG/metainfo.xml        |  30 -
 .../services/HDFS/configuration/core-site.xml   | 253 --------
 .../services/HDFS/configuration/global.xml      | 187 ------
 .../HDFS/configuration/hadoop-policy.xml        | 134 -----
 .../services/HDFS/configuration/hdfs-site.xml   | 454 ---------------
 .../HDPLocal/1.3.2/services/HDFS/metainfo.xml   |  46 --
 .../services/HIVE/configuration/global.xml      | 125 ----
 .../services/HIVE/configuration/hive-site.xml   | 230 --------
 .../HDPLocal/1.3.2/services/HIVE/metainfo.xml   |  43 --
 .../1.3.2/services/HUE/configuration/global.xml |  35 --
 .../services/HUE/configuration/hue-site.xml     | 290 ----------
 .../HDPLocal/1.3.2/services/HUE/metainfo.xml    |  31 -
 .../configuration/capacity-scheduler.xml        | 195 -------
 .../MAPREDUCE/configuration/core-site.xml       |  20 -
 .../services/MAPREDUCE/configuration/global.xml | 160 ------
 .../configuration/mapred-queue-acls.xml         |  39 --
 .../MAPREDUCE/configuration/mapred-site.xml     | 574 -------------------
 .../1.3.2/services/MAPREDUCE/metainfo.xml       |  41 --
 .../services/NAGIOS/configuration/global.xml    |  50 --
 .../HDPLocal/1.3.2/services/NAGIOS/metainfo.xml |  30 -
 .../services/OOZIE/configuration/global.xml     | 105 ----
 .../services/OOZIE/configuration/oozie-site.xml | 237 --------
 .../HDPLocal/1.3.2/services/OOZIE/metainfo.xml  |  35 --
 .../services/PIG/configuration/pig.properties   |  52 --
 .../HDPLocal/1.3.2/services/PIG/metainfo.xml    |  30 -
 .../HDPLocal/1.3.2/services/SQOOP/metainfo.xml  |  30 -
 .../WEBHCAT/configuration/webhcat-site.xml      | 126 ----
 .../1.3.2/services/WEBHCAT/metainfo.xml         |  31 -
 .../services/ZOOKEEPER/configuration/global.xml |  75 ---
 .../1.3.2/services/ZOOKEEPER/metainfo.xml       |  35 --
 .../stacks/HDPLocal/1.3.3/metainfo.xml          |  22 -
 .../stacks/HDPLocal/1.3.3/repos/repoinfo.xml    |  75 ---
 .../services/FLUME/configuration/global.xml     |  24 -
 .../HDPLocal/1.3.3/services/FLUME/metainfo.xml  |  30 -
 .../services/GANGLIA/configuration/global.xml   |  55 --
 .../1.3.3/services/GANGLIA/metainfo.xml         |  40 --
 .../services/HBASE/configuration/global.xml     | 160 ------
 .../HBASE/configuration/hbase-policy.xml        |  53 --
 .../services/HBASE/configuration/hbase-site.xml | 367 ------------
 .../HDPLocal/1.3.3/services/HBASE/metainfo.xml  |  40 --
 .../services/HCATALOG/configuration/global.xml  |  45 --
 .../1.3.3/services/HCATALOG/metainfo.xml        |  30 -
 .../services/HDFS/configuration/core-site.xml   | 253 --------
 .../services/HDFS/configuration/global.xml      | 187 ------
 .../HDFS/configuration/hadoop-policy.xml        | 134 -----
 .../services/HDFS/configuration/hdfs-site.xml   | 454 ---------------
 .../HDPLocal/1.3.3/services/HDFS/metainfo.xml   |  46 --
 .../services/HIVE/configuration/global.xml      | 125 ----
 .../services/HIVE/configuration/hive-site.xml   | 230 --------
 .../HDPLocal/1.3.3/services/HIVE/metainfo.xml   |  43 --
 .../1.3.3/services/HUE/configuration/global.xml |  35 --
 .../services/HUE/configuration/hue-site.xml     | 290 ----------
 .../HDPLocal/1.3.3/services/HUE/metainfo.xml    |  31 -
 .../configuration/capacity-scheduler.xml        | 195 -------
 .../MAPREDUCE/configuration/core-site.xml       |  20 -
 .../services/MAPREDUCE/configuration/global.xml | 160 ------
 .../configuration/mapred-queue-acls.xml         |  39 --
 .../MAPREDUCE/configuration/mapred-site.xml     | 573 ------------------
 .../1.3.3/services/MAPREDUCE/metainfo.xml       |  41 --
 .../services/NAGIOS/configuration/global.xml    |  50 --
 .../HDPLocal/1.3.3/services/NAGIOS/metainfo.xml |  30 -
 .../services/OOZIE/configuration/global.xml     | 105 ----
 .../services/OOZIE/configuration/oozie-site.xml | 237 --------
 .../HDPLocal/1.3.3/services/OOZIE/metainfo.xml  |  35 --
 .../services/PIG/configuration/pig.properties   |  52 --
 .../HDPLocal/1.3.3/services/PIG/metainfo.xml    |  30 -
 .../HDPLocal/1.3.3/services/SQOOP/metainfo.xml  |  30 -
 .../WEBHCAT/configuration/webhcat-site.xml      | 126 ----
 .../1.3.3/services/WEBHCAT/metainfo.xml         |  31 -
 .../services/ZOOKEEPER/configuration/global.xml |  75 ---
 .../1.3.3/services/ZOOKEEPER/metainfo.xml       |  35 --
 .../stacks/HDPLocal/2.0.5/metainfo.xml          |  21 -
 .../stacks/HDPLocal/2.0.5/repos/repoinfo.xml    |  61 --
 .../2.0.5/services/GANGLIA/metainfo.xml         |  36 --
 .../services/HBASE/configuration/global.xml     | 160 ------
 .../HBASE/configuration/hbase-policy.xml        |  53 --
 .../services/HBASE/configuration/hbase-site.xml | 364 ------------
 .../HDPLocal/2.0.5/services/HBASE/metainfo.xml  |  40 --
 .../2.0.5/services/HCATALOG/metainfo.xml        |  30 -
 .../services/HDFS/configuration/core-site.xml   | 167 ------
 .../services/HDFS/configuration/global.xml      | 192 -------
 .../HDFS/configuration/hadoop-policy.xml        | 134 -----
 .../services/HDFS/configuration/hdfs-site.xml   | 462 ---------------
 .../HDPLocal/2.0.5/services/HDFS/metainfo.xml   |  57 --
 .../services/HIVE/configuration/hive-site.xml   | 261 ---------
 .../HDPLocal/2.0.5/services/HIVE/metainfo.xml   |  43 --
 .../configuration/container-executor.cfg        |  20 -
 .../MAPREDUCE2/configuration/core-site.xml      |  20 -
 .../MAPREDUCE2/configuration/global.xml         |  44 --
 .../configuration/mapred-queue-acls.xml         |  39 --
 .../MAPREDUCE2/configuration/mapred-site.xml    | 392 -------------
 .../2.0.5/services/MAPREDUCE2/metainfo.xml      |  32 --
 .../HDPLocal/2.0.5/services/NAGIOS/metainfo.xml |  30 -
 .../services/OOZIE/configuration/oozie-site.xml | 245 --------
 .../HDPLocal/2.0.5/services/OOZIE/metainfo.xml  |  35 --
 .../services/PIG/configuration/pig.properties   |  52 --
 .../HDPLocal/2.0.5/services/PIG/metainfo.xml    |  30 -
 .../HDPLocal/2.0.5/services/SQOOP/metainfo.xml  |  30 -
 .../WEBHCAT/configuration/webhcat-site.xml      | 126 ----
 .../2.0.5/services/WEBHCAT/metainfo.xml         |  31 -
 .../YARN/configuration/capacity-scheduler.xml   | 120 ----
 .../YARN/configuration/container-executor.cfg   |  20 -
 .../services/YARN/configuration/core-site.xml   |  20 -
 .../services/YARN/configuration/global.xml      |  64 ---
 .../services/YARN/configuration/yarn-site.xml   | 312 ----------
 .../HDPLocal/2.0.5/services/YARN/metainfo.xml   |  36 --
 .../services/ZOOKEEPER/configuration/global.xml |  75 ---
 .../2.0.5/services/ZOOKEEPER/metainfo.xml       |  35 --
 .../stacks/HDPLocal/2.0.6/metainfo.xml          |  22 -
 .../stacks/HDPLocal/2.0.6/repos/repoinfo.xml    |  75 ---
 .../2.0.6/services/GANGLIA/metainfo.xml         |  36 --
 .../services/HBASE/configuration/global.xml     | 160 ------
 .../HBASE/configuration/hbase-policy.xml        |  53 --
 .../services/HBASE/configuration/hbase-site.xml | 356 ------------
 .../HDPLocal/2.0.6/services/HBASE/metainfo.xml  |  44 --
 .../2.0.6/services/HCATALOG/metainfo.xml        |  30 -
 .../services/HDFS/configuration/core-site.xml   | 167 ------
 .../services/HDFS/configuration/global.xml      | 192 -------
 .../HDFS/configuration/hadoop-policy.xml        | 134 -----
 .../services/HDFS/configuration/hdfs-site.xml   | 484 ----------------
 .../HDPLocal/2.0.6/services/HDFS/metainfo.xml   |  60 --
 .../services/HIVE/configuration/hive-site.xml   | 260 ---------
 .../HDPLocal/2.0.6/services/HIVE/metainfo.xml   |  45 --
 .../configuration/container-executor.cfg        |  20 -
 .../MAPREDUCE2/configuration/core-site.xml      |  20 -
 .../MAPREDUCE2/configuration/global.xml         |  44 --
 .../configuration/mapred-queue-acls.xml         |  39 --
 .../MAPREDUCE2/configuration/mapred-site.xml    | 379 ------------
 .../2.0.6/services/MAPREDUCE2/metainfo.xml      |  38 --
 .../HDPLocal/2.0.6/services/NAGIOS/metainfo.xml |  30 -
 .../services/OOZIE/configuration/oozie-site.xml | 313 ----------
 .../HDPLocal/2.0.6/services/OOZIE/metainfo.xml  |  38 --
 .../services/PIG/configuration/pig.properties   |  52 --
 .../HDPLocal/2.0.6/services/PIG/metainfo.xml    |  30 -
 .../HDPLocal/2.0.6/services/SQOOP/metainfo.xml  |  30 -
 .../WEBHCAT/configuration/webhcat-site.xml      | 129 -----
 .../2.0.6/services/WEBHCAT/metainfo.xml         |  31 -
 .../YARN/configuration/capacity-scheduler.xml   | 120 ----
 .../YARN/configuration/container-executor.cfg   |  20 -
 .../services/YARN/configuration/core-site.xml   |  20 -
 .../services/YARN/configuration/global.xml      |  64 ---
 .../services/YARN/configuration/yarn-site.xml   | 326 -----------
 .../HDPLocal/2.0.6/services/YARN/metainfo.xml   |  42 --
 .../services/ZOOKEEPER/configuration/global.xml |  75 ---
 .../2.0.6/services/ZOOKEEPER/metainfo.xml       |  35 --
 249 files changed, 27880 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/repos/repoinfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/repos/repoinfo.xml
deleted file mode 100644
index 8bf9a78..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/repos/repoinfo.xml
+++ /dev/null
@@ -1,87 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<reposinfo>
-  <os type="centos6">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.2.0/repos/centos6</baseurl>
-      <repoid>HDP-1.2.0</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="centos5">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.2.0/repos/centos5</baseurl>
-      <repoid>HDP-1.2.0</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="redhat6">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.2.0/repos/centos6</baseurl>
-      <repoid>HDP-1.2.0</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="redhat5">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.2.0/repos/centos5</baseurl>
-      <repoid>HDP-1.2.0</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="oraclelinux6">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.2.0/repos/centos6</baseurl>
-      <repoid>HDP-1.2.0</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="oraclelinux5">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.2.0/repos/centos5</baseurl>
-      <repoid>HDP-1.2.0</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="suse11">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.2.0/repos/suse11</baseurl>
-      <repoid>HDP-1.2.0</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/suse11</baseurl>
-      <repoid>HDP-UTILS-1.1.0.15</repoid>
-      <reponame>HDP-UTILS</reponame>
-      <mirrorslist></mirrorslist>
-    </repo>
-  </os>
-    <os type="sles11">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.2.0/repos/suse11</baseurl>
-      <repoid>HDP-1.2.0</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/suse11</baseurl>
-      <repoid>HDP-UTILS-1.1.0.15</repoid>
-      <reponame>HDP-UTILS</reponame>
-      <mirrorslist></mirrorslist>
-    </repo>
-  </os>
-</reposinfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/GANGLIA/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/GANGLIA/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/GANGLIA/metainfo.xml
deleted file mode 100644
index 0b21f0f..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/GANGLIA/metainfo.xml
+++ /dev/null
@@ -1,40 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Ganglia Metrics Collection system</comment>
-    <version>3.2.0</version>
-
-    <components>
-        <component>
-            <name>GANGLIA_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>GANGLIA_MONITOR</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>MONITOR_WEBSERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HBASE/configuration/hbase-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HBASE/configuration/hbase-policy.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HBASE/configuration/hbase-policy.xml
deleted file mode 100644
index e45f23c..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HBASE/configuration/hbase-policy.xml
+++ /dev/null
@@ -1,53 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HRegionInterface protocol implementations (ie. 
-    clients talking to HRegionServers)
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.admin.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HMasterInterface protocol implementation (ie. 
-    clients talking to HMaster for admin operations).
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.masterregion.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HMasterRegionInterface protocol implementations
-    (for HRegionServers communicating with HMaster)
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HBASE/configuration/hbase-site.xml
deleted file mode 100644
index c4b3651..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HBASE/configuration/hbase-site.xml
+++ /dev/null
@@ -1,334 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>hbase.rootdir</name>
-    <value></value>
-    <description>The directory shared by region servers and into
-    which HBase persists.  The URL should be 'fully-qualified'
-    to include the filesystem scheme.  For example, to specify the
-    HDFS directory '/hbase' where the HDFS instance's namenode is
-    running at namenode.example.org on port 9000, set this value to:
-    hdfs://namenode.example.org:9000/hbase.  By default HBase writes
-    into /tmp.  Change this configuration else all data will be lost
-    on machine restart.
-    </description>
-  </property>
-  <property>
-    <name>hbase.cluster.distributed</name>
-    <value>true</value>
-    <description>The mode the cluster will be in. Possible values are
-      false for standalone mode and true for distributed mode.  If
-      false, startup will run all HBase and ZooKeeper daemons together
-      in the one JVM.
-    </description>
-  </property>
-  <property>
-    <name>hbase.tmp.dir</name>
-    <value></value>
-    <description>Temporary directory on the local filesystem.
-    Change this setting to point to a location more permanent
-    than '/tmp' (The '/tmp' directory is often cleared on
-    machine restart).
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.bindAddress</name>
-    <value></value>
-    <description>The bind address for the HBase Master web UI
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.global.memstore.upperLimit</name>
-    <value></value>
-    <description>Maximum size of all memstores in a region server before new
-      updates are blocked and flushes are forced. Defaults to 40% of heap
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.handler.count</name>
-    <value></value>
-    <description>Count of RPC Listener instances spun up on RegionServers.
-    Same property is used by the Master for count of master handlers.
-    Default is 10.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.majorcompaction</name>
-    <value></value>
-    <description>The time (in miliseconds) between 'major' compactions of all
-    HStoreFiles in a region.  Default: 1 day.
-    Set to 0 to disable automated major compactions.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.lease.thread.wakefrequency</name>
-    <value>3000</value>
-    <description>The interval between checks for expired region server leases.
-    This value has been reduced due to the other reduced values above so that
-    the master will notice a dead region server sooner. The default is 15 seconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.global.memstore.lowerLimit</name>
-    <value></value>
-    <description>When memstores are being forced to flush to make room in
-      memory, keep flushing until we hit this mark. Defaults to 35% of heap.
-      This value equal to hbase.regionserver.global.memstore.upperLimit causes
-      the minimum possible flushing to occur when updates are blocked due to
-      memstore limiting.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.block.multiplier</name>
-    <value></value>
-    <description>Block updates if memstore has hbase.hregion.memstore.block.multiplier
-    time hbase.hregion.flush.size bytes.  Useful preventing
-    runaway memstore during spikes in update traffic.  Without an
-    upper-bound, memstore fills such that when it flushes the
-    resultant flush files take a long time to compact or split, or
-    worse, we OOME
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.flush.size</name>
-    <value></value>
-    <description>
-    Memstore will be flushed to disk if size of the memstore
-    exceeds this number of bytes.  Value is checked by a thread that runs
-    every hbase.server.thread.wakefrequency.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.mslab.enabled</name>
-    <value></value>
-    <description>
-      Enables the MemStore-Local Allocation Buffer,
-      a feature which works to prevent heap fragmentation under
-      heavy write loads. This can reduce the frequency of stop-the-world
-      GC pauses on large heaps.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.max.filesize</name>
-    <value></value>
-    <description>
-    Maximum HStoreFile size. If any one of a column families' HStoreFiles has
-    grown to exceed this value, the hosting HRegion is split in two.
-    Default: 1G.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.scanner.caching</name>
-    <value></value>
-    <description>Number of rows that will be fetched when calling next
-    on a scanner if it is not served from (local, client) memory. Higher
-    caching values will enable faster scanners but will eat up more memory
-    and some calls of next may take longer and longer times when the cache is empty.
-    Do not set this value such that the time between invocations is greater
-    than the scanner timeout; i.e. hbase.regionserver.lease.period
-    </description>
-  </property>
-  <property>
-    <name>zookeeper.session.timeout</name>
-    <value></value>
-    <description>ZooKeeper session timeout.
-      HBase passes this to the zk quorum as suggested maximum time for a
-      session (This setting becomes zookeeper's 'maxSessionTimeout').  See
-      http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
-      "The client sends a requested timeout, the server responds with the
-      timeout that it can give the client. " In milliseconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.keyvalue.maxsize</name>
-    <value></value>
-    <description>Specifies the combined maximum allowed size of a KeyValue
-    instance. This is to set an upper boundary for a single entry saved in a
-    storage file. Since they cannot be split it helps avoiding that a region
-    cannot be split any further because the data is too large. It seems wise
-    to set this to a fraction of the maximum region size. Setting it to zero
-    or less disables the check.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.compactionThreshold</name>
-    <value></value>
-    <description>
-    If more than this number of HStoreFiles in any one HStore
-    (one HStoreFile is written per flush of memstore) then a compaction
-    is run to rewrite all HStoreFiles files as one.  Larger numbers
-    put off compaction but when it runs, it takes longer to complete.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.blockingStoreFiles</name>
-    <value></value>
-    <description>
-    If more than this number of StoreFiles in any one Store
-    (one StoreFile is written per flush of MemStore) then updates are
-    blocked for this HRegion until a compaction is completed, or
-    until hbase.hstore.blockingWaitTime has been exceeded.
-    </description>
-  </property>
-  <property>
-    <name>hfile.block.cache.size</name>
-    <value></value>
-    <description>
-        Percentage of maximum heap (-Xmx setting) to allocate to block cache
-        used by HFile/StoreFile. Default of 0.25 means allocate 25%.
-        Set to 0 to disable but it's not recommended.
-    </description>
-  </property>
-
-  <!-- The following properties configure authentication information for
-       HBase processes when using Kerberos security.  There are no default
-       values, included here for documentation purposes -->
-  <property>
-    <name>hbase.master.keytab.file</name>
-    <value></value>
-    <description>Full path to the kerberos keytab file to use for logging in
-    the configured HMaster server principal.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.kerberos.principal</name>
-    <value></value>
-    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
-    that should be used to run the HMaster process.  The principal name should
-    be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the hostname
-    portion, it will be replaced with the actual hostname of the running
-    instance.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.keytab.file</name>
-    <value></value>
-    <description>Full path to the kerberos keytab file to use for logging in
-    the configured HRegionServer server principal.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.kerberos.principal</name>
-    <value></value>
-    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
-    that should be used to run the HRegionServer process.  The principal name
-    should be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the
-    hostname portion, it will be replaced with the actual hostname of the
-    running instance.  An entry for this principal must exist in the file
-    specified in hbase.regionserver.keytab.file
-    </description>
-  </property>
-
-  <!-- Additional configuration specific to HBase security -->
-  <property>
-    <name>hbase.superuser</name>
-    <value>hbase</value>
-    <description>List of users or groups (comma-separated), who are allowed
-    full privileges, regardless of stored ACLs, across the cluster.
-    Only used when HBase security is enabled.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.region.classes</name>
-    <value></value>
-    <description>A comma-separated list of Coprocessors that are loaded by
-    default on all tables. For any override coprocessor method, these classes
-    will be called in order. After implementing your own Coprocessor, just put
-    it in HBase's classpath and add the fully qualified class name here.
-    A coprocessor can also be loaded on demand by setting HTableDescriptor.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.master.classes</name>
-    <value></value>
-    <description>A comma-separated list of
-      org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
-      loaded by default on the active HMaster process. For any implemented
-      coprocessor methods, the listed classes will be called in order. After
-      implementing your own MasterObserver, just put it in HBase's classpath
-      and add the fully qualified class name here.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.zookeeper.property.clientPort</name>
-    <value>2181</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-    The port at which the clients will connect.
-    </description>
-  </property>
-
-  <!--
-  The following three properties are used together to create the list of
-  host:peer_port:leader_port quorum servers for ZooKeeper.
-  -->
-  <property>
-    <name>hbase.zookeeper.quorum</name>
-    <value></value>
-    <description>Comma separated list of servers in the ZooKeeper Quorum.
-    For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
-    By default this is set to localhost for local and pseudo-distributed modes
-    of operation. For a fully-distributed setup, this should be set to a full
-    list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
-    this is the list of servers which we will start/stop ZooKeeper on.
-    </description>
-  </property>
-  <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
-
-  <property>
-    <name>dfs.support.append</name>
-    <value></value>
-    <description>Does HDFS allow appends to files?
-    This is an hdfs config. set in here so the hdfs client will do append support.
-    You must ensure that this config. is true serverside too when running hbase
-    (You will have to restart your cluster after setting it).
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.client.read.shortcircuit</name>
-    <value></value>
-    <description>Enable/Disable short circuit read for your client.
-    Hadoop servers should be configured to allow short circuit read
-    for the hbase user for this to take effect
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.client.read.shortcircuit.skip.checksum</name>
-    <value></value>
-    <description>Enable/disbale skipping the checksum check</description>
-  </property>
-
-  <property>
-    <name>hbase.regionserver.optionalcacheflushinterval</name>
-    <value>10000</value>
-    <description>
-      Amount of time to wait since the last time a region was flushed before
-      invoking an optional cache flush. Default 60,000.
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HBASE/metainfo.xml
deleted file mode 100644
index c91d9f0..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HBASE/metainfo.xml
+++ /dev/null
@@ -1,40 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>mapred</user>
-    <comment>Non-relational distributed database and centralized service for configuration management &amp; synchronization</comment>
-    <version>0.94.2</version>
-
-    <components>
-        <component>
-            <name>HBASE_MASTER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>HBASE_REGIONSERVER</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>HBASE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HCATALOG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HCATALOG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HCATALOG/metainfo.xml
deleted file mode 100644
index 1951a5d..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HCATALOG/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for HCATALOG service</comment>
-    <version>0.5.0</version>
-
-    <components>
-        <component>
-            <name>HCAT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HDFS/configuration/core-site.xml
deleted file mode 100644
index a3f8137..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HDFS/configuration/core-site.xml
+++ /dev/null
@@ -1,251 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
- <!--
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
- 
-        http://www.apache.org/licenses/LICENSE-2.0
- 
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
- -->
- 
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-<!-- i/o properties -->
-
-  <property>
-    <name>io.file.buffer.size</name>
-    <value>131072</value>
-    <description>The size of buffer for use in sequence files.
-  The size of this buffer should probably be a multiple of hardware
-  page size (4096 on Intel x86), and it determines how much data is
-  buffered during read and write operations.</description>
-  </property>
-
-  <property>
-    <name>io.serializations</name>
-    <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
-  </property>
-
-  <property>
-    <name>io.compression.codecs</name>
-    <value></value>
-    <description>A list of the compression codec classes that can be used
-                 for compression/decompression.</description>
-  </property>
-
-  <property>
-    <name>io.compression.codec.lzo.class</name>
-    <value>com.hadoop.compression.lzo.LzoCodec</value>
-    <description>The implementation for lzo codec.</description>
-  </property>
-
-<!-- file system properties -->
-
-  <property>
-    <name>fs.default.name</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>The name of the default file system.  Either the
-  literal string "local" or a host:port for NDFS.</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>fs.trash.interval</name>
-    <value>360</value>
-    <description>Number of minutes between trash checkpoints.
-  If zero, the trash feature is disabled.
-  </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.dir</name>
-    <value></value>
-    <description>Determines where on the local filesystem the DFS secondary
-        name node should store the temporary images to merge.
-        If this is a comma-delimited list of directories then the image is
-        replicated in all of the directories for redundancy.
-    </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.edits.dir</name>
-    <value>${fs.checkpoint.dir}</value>
-    <description>Determines where on the local filesystem the DFS secondary
-        name node should store the temporary edits to merge.
-        If this is a comma-delimited list of directoires then teh edits is
-        replicated in all of the directoires for redundancy.
-        Default value is same as fs.checkpoint.dir
-    </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.period</name>
-    <value>21600</value>
-    <description>The number of seconds between two periodic checkpoints.
-  </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.size</name>
-    <value>536870912</value>
-    <description>The size of the current edit log (in bytes) that triggers
-       a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
-  </description>
-  </property>
-
-  <!-- ipc properties: copied from kryptonite configuration -->
-  <property>
-    <name>ipc.client.idlethreshold</name>
-    <value>8000</value>
-    <description>Defines the threshold number of connections after which
-               connections will be inspected for idleness.
-  </description>
-  </property>
-
-  <property>
-    <name>ipc.client.connection.maxidletime</name>
-    <value>30000</value>
-    <description>The maximum time after which a client will bring down the
-               connection to the server.
-  </description>
-  </property>
-
-  <property>
-    <name>ipc.client.connect.max.retries</name>
-    <value>50</value>
-    <description>Defines the maximum number of retries for IPC connections.</description>
-  </property>
-
-  <!-- Web Interface Configuration -->
-  <property>
-    <name>webinterface.private.actions</name>
-    <value>false</value>
-    <description> If set to true, the web interfaces of JT and NN may contain
-                actions, such as kill job, delete file, etc., that should
-                not be exposed to public. Enable this option if the interfaces
-                are only reachable by those who have the right authorization.
-  </description>
-  </property>
-
- <property>
-   <name>hadoop.security.authentication</name>
-   <value>simple</value>
-   <description>
-   Set the authentication for the cluster. Valid values are: simple or
-   kerberos.
-   </description>
- </property>
-<property>
-  <name>hadoop.security.authorization</name>
-  <value></value>
-  <description>
-     Enable authorization for different protocols.
-  </description>
-</property>
-
-  <property>
-    <name>hadoop.security.auth_to_local</name>
-    <value></value>
-<description>The mapping from kerberos principal names to local OS user names.
-  So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
-  "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
-The translations rules have 3 sections:
-      base     filter    substitution
-The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
-
-[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
-[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
-[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
-
-The filter is a regex in parens that must the generated string for the rule to apply.
-
-"(.*%admin)" will take any string that ends in "%admin"
-"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
-
-Finally, the substitution is a sed rule to translate a regex into a fixed string.
-
-"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
-"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
-"s/X/Y/g" replaces all of the "X" in the name with "Y"
-
-So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
-
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
-
-To also translate the names with a second component, you'd make the rules:
-
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-RULE:[2:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
-
-If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
-
-RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
-DEFAULT
-    </description>
-  </property>
-
-<!--
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("hcat_user").groups</name>
-  <value></value>
-  <description>
-     Proxy group for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("hcat_user").hosts</name>
-  <value></value>
-  <description>
-     Proxy host for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("oozie_user").groups</name>
-  <value></value>
-  <description>
-     Proxy group for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("oozie_user").hosts</name>
-  <value></value>
-  <description>
-     Proxy host for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("templeton_user").groups</name>
-  <value></value>
-  <description>
-    Proxy group for templeton.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("templeton_user").hosts</name>
-  <value></value>
-  <description>
-    Proxy host for templeton.
-  </description>
-</property>
--->
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HDFS/configuration/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HDFS/configuration/hadoop-policy.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HDFS/configuration/hadoop-policy.xml
deleted file mode 100644
index 900da99..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HDFS/configuration/hadoop-policy.xml
+++ /dev/null
@@ -1,134 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientProtocol, which is used by user code
-    via the DistributedFileSystem.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.client.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
-    for block recovery.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for DatanodeProtocol, which is used by datanodes to
-    communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
-    for updating generation timestamp.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.namenode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for NamenodeProtocol, the protocol used by the secondary
-    namenode to communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.tracker.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterTrackerProtocol, used by the tasktrackers to
-    communicate with the jobtracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.job.submission.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for JobSubmissionProtocol, used by job clients to
-    communciate with the jobtracker for job submission, querying job status etc.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.task.umbilical.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
-    tasks to communicate with the parent tasktracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
- <property>
-    <name>security.admin.operations.protocol.acl</name>
-    <value></value>
-    <description>ACL for AdminOperationsProtocol. Used for admin commands.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.refresh.usertogroups.mappings.protocol.acl</name>
-    <value></value>
-    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
-    users mappings. The ACL is a comma-separated list of user and
-    group names. The user and group list is separated by a blank. For
-    e.g. "alice,bob users,wheel".  A special value of "*" means all
-    users are allowed.</description>
-  </property>
-
-<property>
-    <name>security.refresh.policy.protocol.acl</name>
-    <value></value>
-    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
-    dfsadmin and mradmin commands to refresh the security policy in-effect.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HDFS/configuration/hdfs-site.xml
deleted file mode 100644
index db92d4b..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HDFS/configuration/hdfs-site.xml
+++ /dev/null
@@ -1,415 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-<!-- file system properties -->
-
-  <property>
-    <name>dfs.name.dir</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>Determines where on the local filesystem the DFS name node
-      should store the name table.  If this is a comma-delimited list
-      of directories then the name table is replicated in all of the
-      directories, for redundancy. </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.support.append</name>
-    <value></value>
-    <description>to enable dfs append</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.webhdfs.enabled</name>
-    <value></value>
-    <description>to enable webhdfs</description>
-    <final>true</final>
-  </property>
-
- <property>
-    <name>dfs.datanode.socket.write.timeout</name>
-    <value>0</value>
-    <description>DFS Client write socket timeout</description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.failed.volumes.tolerated</name>
-    <value></value>
-    <description>#of failed disks dn would tolerate</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.block.local-path-access.user</name>
-    <value></value>
-    <description>the user who is allowed to perform short
-    circuit reads.
-    </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.data.dir</name>
-    <value></value>
-    <description>Determines where on the local filesystem an DFS data node
-  should store its blocks.  If this is a comma-delimited
-  list of directories, then data will be stored in all named
-  directories, typically on different devices.
-  Directories that do not exist are ignored.
-  </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.hosts.exclude</name>
-    <value></value>
-    <description>Names a file that contains a list of hosts that are
-    not permitted to connect to the namenode.  The full pathname of the
-    file must be specified.  If the value is empty, no hosts are
-    excluded.</description>
-  </property>
-
-  <property>
-    <name>dfs.hosts</name>
-    <value></value>
-    <description>Names a file that contains a list of hosts that are
-    permitted to connect to the namenode. The full pathname of the file
-    must be specified.  If the value is empty, all hosts are
-    permitted.</description>
-  </property>
-
-  <property>
-    <name>dfs.replication.max</name>
-    <value>50</value>
-    <description>Maximal block replication.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.replication</name>
-    <value></value>
-    <description>Default block replication.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.heartbeat.interval</name>
-    <value>3</value>
-    <description>Determines datanode heartbeat interval in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.safemode.threshold.pct</name>
-    <value>1.0f</value>
-    <description>
-        Specifies the percentage of blocks that should satisfy
-        the minimal replication requirement defined by dfs.replication.min.
-        Values less than or equal to 0 mean not to start in safe mode.
-        Values greater than 1 will make safe mode permanent.
-        </description>
-  </property>
-
-  <property>
-    <name>dfs.balance.bandwidthPerSec</name>
-    <value>6250000</value>
-    <description>
-        Specifies the maximum amount of bandwidth that each datanode
-        can utilize for the balancing purpose in term of
-        the number of bytes per second.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.address</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>dfs.datanode.http.address</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>dfs.block.size</name>
-    <value>134217728</value>
-    <description>The default block size for new files.</description>
-  </property>
-
-  <property>
-    <name>dfs.http.address</name>
-    <value></value>
-<description>The name of the default file system.  Either the
-literal string "local" or a host:port for NDFS.</description>
-<final>true</final>
-</property>
-
-<property>
-<name>dfs.datanode.du.reserved</name>
-<!-- cluster variant -->
-<value></value>
-<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
-</description>
-</property>
-
-<property>
-<name>dfs.datanode.ipc.address</name>
-<value>0.0.0.0:8010</value>
-<description>
-The datanode ipc server address and port.
-If the port is 0 then the server will start on a free port.
-</description>
-</property>
-
-<property>
-<name>dfs.blockreport.initialDelay</name>
-<value>120</value>
-<description>Delay for first block report in seconds.</description>
-</property>
-
-<property>
-<name>dfs.datanode.du.pct</name>
-<value>0.85f</value>
-<description>When calculating remaining space, only use this percentage of the real available space
-</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>40</value>
-<description>The number of server threads for the namenode.</description>
-</property>
-
-<property>
-<name>dfs.datanode.max.xcievers</name>
-<value>4096</value>
-<description>PRIVATE CONFIG VARIABLE</description>
-</property>
-
-<!-- Permissions configuration -->
-
-<property>
-<name>dfs.umaskmode</name>
-<value>077</value>
-<description>
-The octal umask used when creating files and directories.
-</description>
-</property>
-
-<property>
-<name>dfs.web.ugi</name>
-<!-- cluster variant -->
-<value>gopher,gopher</value>
-<description>The user account used by the web interface.
-Syntax: USERNAME,GROUP1,GROUP2, ...
-</description>
-</property>
-
-<property>
-<name>dfs.permissions</name>
-<value>true</value>
-<description>
-If "true", enable permission checking in HDFS.
-If "false", permission checking is turned off,
-but all other behavior is unchanged.
-Switching from one parameter value to the other does not change the mode,
-owner or group of files or directories.
-</description>
-</property>
-
-<property>
-<name>dfs.permissions.supergroup</name>
-<value>hdfs</value>
-<description>The name of the group of super-users.</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>100</value>
-<description>Added to grow Queue size so that more client connections are allowed</description>
-</property>
-
-<property>
-<name>ipc.server.max.response.size</name>
-<value>5242880</value>
-</property>
-<property>
-<name>dfs.block.access.token.enable</name>
-<value>true</value>
-<description>
-If "true", access tokens are used as capabilities for accessing datanodes.
-If "false", no access tokens are checked on accessing datanodes.
-</description>
-</property>
-
-<property>
-<name>dfs.namenode.kerberos.principal</name>
-<value></value>
-<description>
-Kerberos principal name for the NameNode
-</description>
-</property>
-
-<property>
-<name>dfs.secondary.namenode.kerberos.principal</name>
-<value></value>
-    <description>
-        Kerberos principal name for the secondary NameNode.
-    </description>
-  </property>
-
-
-<!--
-  This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
--->
-  <property>
-    <name>dfs.namenode.kerberos.https.principal</name>
-    <value></value>
-     <description>The Kerberos principal for the host that the NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.kerberos.https.principal</name>
-    <value></value>
-    <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <!-- cluster variant -->
-    <name>dfs.secondary.http.address</name>
-    <value></value>
-    <description>Address of secondary namenode web server</description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.https.port</name>
-    <value>50490</value>
-    <description>The https port where secondary-namenode binds</description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.principal</name>
-    <value></value>
-    <description>
-      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
-      HTTP SPENGO specification.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.keytab</name>
-    <value></value>
-    <description>
-      The Kerberos keytab file with the credentials for the
-      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.kerberos.principal</name>
-    <value></value>
- <description>
-        The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.keytab.file</name>
-    <value></value>
- <description>
-        Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.keytab.file</name>
-    <value></value>
-  <description>
-        Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.keytab.file</name>
-    <value></value>
- <description>
-        The filename of the keytab file for the DataNode.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.https.port</name>
-    <value>50470</value>
- <description>The https port where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.https.address</name>
-    <value></value>
-  <description>The https address where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.datanode.data.dir.perm</name>
-    <value></value>
-<description>The permissions that should be there on dfs.data.dir
-directories. The datanode will not come up if the permissions are
-different on existing dfs.data.dir directories. If the directories
-don't exist, they will be created with this permission.</description>
-  </property>
-
-  <property>
-  <name>dfs.access.time.precision</name>
-  <value>0</value>
-  <description>The access time for HDFS file is precise upto this value.
-               The default value is 1 hour. Setting a value of 0 disables
-               access times for HDFS.
-  </description>
-</property>
-
-<property>
- <name>dfs.cluster.administrators</name>
- <value> hdfs</value>
- <description>ACL for who all can view the default servlets in the HDFS</description>
-</property>
-
-<property>
-  <name>ipc.server.read.threadpool.size</name>
-  <value>5</value>
-  <description></description>
-</property>
-
-<property>
-  <name>dfs.datanode.failed.volumes.tolerated</name>
-  <value>0</value>
-  <description>Number of failed disks datanode would tolerate</description>
-</property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HDFS/metainfo.xml
deleted file mode 100644
index 1b185e1..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HDFS/metainfo.xml
+++ /dev/null
@@ -1,46 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Apache Hadoop Distributed File System</comment>
-    <version>1.1.2</version>
-
-    <components>
-        <component>
-            <name>NAMENODE</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>DATANODE</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>SECONDARY_NAMENODE</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>HDFS_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HIVE/configuration/hive-site.xml
deleted file mode 100644
index 7d35558..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HIVE/configuration/hive-site.xml
+++ /dev/null
@@ -1,138 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<configuration>
-  <property>
-    <name>hive.metastore.local</name>
-    <value>false</value>
-    <description>controls whether to connect to remove metastore server or
-    open a new metastore server in Hive Client JVM</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionURL</name>
-    <value></value>
-    <description>JDBC connect string for a JDBC metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionDriverName</name>
-    <value>com.mysql.jdbc.Driver</value>
-    <description>Driver class name for a JDBC metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionUserName</name>
-    <value></value>
-    <description>username to use against metastore database</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionPassword</name>
-    <value></value>
-    <description>password to use against metastore database</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.warehouse.dir</name>
-    <value>/apps/hive/warehouse</value>
-    <description>location of default database for the warehouse</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.sasl.enabled</name>
-    <value></value>
-    <description>If true, the metastore thrift interface will be secured with SASL.
-     Clients must authenticate with Kerberos.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.kerberos.keytab.file</name>
-    <value></value>
-    <description>The path to the Kerberos Keytab file containing the metastore
-     thrift server's service principal.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.kerberos.principal</name>
-    <value></value>
-    <description>The service principal for the metastore thrift server. The special
-    string _HOST will be replaced automatically with the correct host name.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.cache.pinobjtypes</name>
-    <value>Table,Database,Type,FieldSchema,Order</value>
-    <description>List of comma separated metastore object types that should be pinned in the cache</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.uris</name>
-    <value></value>
-    <description>URI for client to contact metastore server</description>
-  </property>
-
-  <property>
-    <name>hive.semantic.analyzer.factory.impl</name>
-    <value>org.apache.hivealog.cli.HCatSemanticAnalyzerFactory</value>
-    <description>controls which SemanticAnalyzerFactory implemenation class is used by CLI</description>
-  </property>
-
-  <property>
-    <name>hadoop.clientside.fs.operations</name>
-    <value>true</value>
-    <description>FS operations are owned by client</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.client.socket.timeout</name>
-    <value>60</value>
-    <description>MetaStore Client socket timeout in seconds</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.execute.setugi</name>
-    <value>true</value>
-    <description>In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.</description>
-  </property>
-
-  <property>
-    <name>hive.security.authorization.enabled</name>
-    <value>true</value>
-    <description>enable or disable the hive client authorization</description>
-  </property>
-
-  <property>
-    <name>hive.security.authorization.manager</name>
-    <value>org.apache.hcatalog.security.HdfsAuthorizationProvider</value>
-    <description>the hive client authorization manager class name.
-    The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
-  </property>
-
-  <property>
-    <name>hive.server2.enable.doAs</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>fs.hdfs.impl.disable.cache</name>
-    <value>true</value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HIVE/metainfo.xml
deleted file mode 100644
index 6a52064..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HIVE/metainfo.xml
+++ /dev/null
@@ -1,43 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
-    <version>0.10.0</version>
-
-    <components>        
-        <component>
-            <name>HIVE_METASTORE</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>HIVE_SERVER</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>MYSQL_SERVER</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>HIVE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-
-</metainfo>


[04/14] AMBARI-3777. Remove HDPLocal stack from stack definition. (yusaku)

Posted by yu...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HCATALOG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HCATALOG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HCATALOG/metainfo.xml
deleted file mode 100644
index a2f075b..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HCATALOG/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for HCATALOG service</comment>
-    <version>0.11.0.2.0.5.0</version>
-
-    <components>
-        <component>
-            <name>HCAT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/configuration/core-site.xml
deleted file mode 100644
index c2f0411..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/configuration/core-site.xml
+++ /dev/null
@@ -1,167 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
- <!--
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
- 
-        http://www.apache.org/licenses/LICENSE-2.0
- 
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
- -->
- 
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-<!-- i/o properties -->
-
-  <property>
-    <name>io.file.buffer.size</name>
-    <value>131072</value>
-    <description>The size of buffer for use in sequence files.
-  The size of this buffer should probably be a multiple of hardware
-  page size (4096 on Intel x86), and it determines how much data is
-  buffered during read and write operations.</description>
-  </property>
-
-  <property>
-    <name>io.serializations</name>
-    <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
-  </property>
-
-  <property>
-    <name>io.compression.codecs</name>
-    <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec</value>
-    <description>A list of the compression codec classes that can be used
-                 for compression/decompression.</description>
-  </property>
-
-<!-- file system properties -->
-
-  <property>
-    <name>fs.defaultFS</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>The name of the default file system.  Either the
-  literal string "local" or a host:port for NDFS.</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>fs.trash.interval</name>
-    <value>360</value>
-    <description>Number of minutes between trash checkpoints.
-  If zero, the trash feature is disabled.
-  </description>
-  </property>
-
-
-  <!-- ipc properties: copied from kryptonite configuration -->
-  <property>
-    <name>ipc.client.idlethreshold</name>
-    <value>8000</value>
-    <description>Defines the threshold number of connections after which
-               connections will be inspected for idleness.
-  </description>
-  </property>
-
-  <property>
-    <name>ipc.client.connection.maxidletime</name>
-    <value>30000</value>
-    <description>The maximum time after which a client will bring down the
-               connection to the server.
-  </description>
-  </property>
-
-  <property>
-    <name>ipc.client.connect.max.retries</name>
-    <value>50</value>
-    <description>Defines the maximum number of retries for IPC connections.</description>
-  </property>
-
-  <!-- Web Interface Configuration -->
-  <property>
-    <name>webinterface.private.actions</name>
-    <value>false</value>
-    <description> If set to true, the web interfaces of JT and NN may contain
-                actions, such as kill job, delete file, etc., that should
-                not be exposed to public. Enable this option if the interfaces
-                are only reachable by those who have the right authorization.
-  </description>
-  </property>
-
- <property>
-   <name>hadoop.security.authentication</name>
-   <value>simple</value>
-   <description>
-   Set the authentication for the cluster. Valid values are: simple or
-   kerberos.
-   </description>
- </property>
-<property>
-  <name>hadoop.security.authorization</name>
-  <value>false</value>
-  <description>
-     Enable authorization for different protocols.
-  </description>
-</property>
-
-  <property>
-    <name>hadoop.security.auth_to_local</name>
-    <value>
-        RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/
-        RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/
-        RULE:[2:$1@$0](hm@.*)s/.*/hbase/
-        RULE:[2:$1@$0](rs@.*)s/.*/hbase/
-        DEFAULT
-    </value>
-<description>The mapping from kerberos principal names to local OS user names.
-  So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
-  "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
-The translations rules have 3 sections:
-      base     filter    substitution
-The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
-
-[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
-[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
-[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
-
-The filter is a regex in parens that must the generated string for the rule to apply.
-
-"(.*%admin)" will take any string that ends in "%admin"
-"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
-
-Finally, the substitution is a sed rule to translate a regex into a fixed string.
-
-"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
-"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
-"s/X/Y/g" replaces all of the "X" in the name with "Y"
-
-So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
-
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
-
-To also translate the names with a second component, you'd make the rules:
-
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-RULE:[2:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
-
-If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
-
-RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
-DEFAULT
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/configuration/global.xml
deleted file mode 100644
index 59b68ac..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/configuration/global.xml
+++ /dev/null
@@ -1,192 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>namenode_host</name>
-    <value></value>
-    <description>NameNode Host.</description>
-  </property>
-  <property>
-    <name>dfs_namenode_name_dir</name>
-    <value>/hadoop/hdfs/namenode</value>
-    <description>NameNode Directories.</description>
-  </property>
-  <property>
-    <name>snamenode_host</name>
-    <value></value>
-    <description>Secondary NameNode.</description>
-  </property>
-  <property>
-    <name>dfs_namenode_checkpoint_dir</name>
-    <value>/hadoop/hdfs/namesecondary</value>
-    <description>Secondary NameNode checkpoint dir.</description>
-  </property>
-  <property>
-    <name>datanode_hosts</name>
-    <value></value>
-    <description>List of Datanode Hosts.</description>
-  </property>
-  <property>
-    <name>dfs_datanode_data_dir</name>
-    <value>/hadoop/hdfs/data</value>
-    <description>Data directories for Data Nodes.</description>
-  </property>
-  <property>
-    <name>hdfs_log_dir_prefix</name>
-    <value>/var/log/hadoop</value>
-    <description>Hadoop Log Dir Prefix</description>
-  </property>
-  <property>
-    <name>hadoop_pid_dir_prefix</name>
-    <value>/var/run/hadoop</value>
-    <description>Hadoop PID Dir Prefix</description>
-  </property>
-  <property>
-    <name>dfs_webhdfs_enabled</name>
-    <value>true</value>
-    <description>WebHDFS enabled</description>
-  </property>
-  <property>
-    <name>hadoop_heapsize</name>
-    <value>1024</value>
-    <description>Hadoop maximum Java heap size</description>
-  </property>
-  <property>
-    <name>namenode_heapsize</name>
-    <value>1024</value>
-    <description>NameNode Java heap size</description>
-  </property>
-  <property>
-    <name>namenode_opt_newsize</name>
-    <value>200</value>
-    <description>NameNode new generation size</description>
-  </property>
-  <property>
-    <name>namenode_opt_maxnewsize</name>
-    <value>640</value>
-    <description>NameNode maximum new generation size</description>
-  </property>
-  <property>
-    <name>datanode_du_reserved</name>
-    <value>1073741824</value>
-    <description>Reserved space for HDFS</description>
-  </property>
-  <property>
-    <name>dtnode_heapsize</name>
-    <value>1024</value>
-    <description>DataNode maximum Java heap size</description>
-  </property>
-  <property>
-    <name>dfs_datanode_failed_volume_tolerated</name>
-    <value>0</value>
-    <description>DataNode volumes failure toleration</description>
-  </property>
-  <property>
-    <name>dfs_namenode_checkpoint_period</name>
-    <value>21600</value>
-    <description>HDFS Maximum Checkpoint Delay</description>
-  </property>
-  <property>
-    <name>fs_checkpoint_size</name>
-    <value>0.5</value>
-    <description>FS Checkpoint Size.</description>
-  </property>
-  <property>
-    <name>proxyuser_group</name>
-    <value>users</value>
-    <description>Proxy user group.</description>
-  </property>
-  <property>
-    <name>dfs_exclude</name>
-    <value></value>
-    <description>HDFS Exclude hosts.</description>
-  </property>
-  <property>
-    <name>dfs_replication</name>
-    <value>3</value>
-    <description>Default Block Replication.</description>
-  </property>
-  <property>
-    <name>dfs_block_local_path_access_user</name>
-    <value>hbase</value>
-    <description>Default Block Replication.</description>
-  </property>
-  <property>
-    <name>dfs_datanode_address</name>
-    <value>50010</value>
-    <description>Port for datanode address.</description>
-  </property>
-  <property>
-    <name>dfs_datanode_http_address</name>
-    <value>50075</value>
-    <description>Port for datanode address.</description>
-  </property>
-  <property>
-    <name>dfs_datanode_data_dir_perm</name>
-    <value>750</value>
-    <description>Datanode dir perms.</description>
-  </property>
-
-  <property>
-    <name>security_enabled</name>
-    <value>false</value>
-    <description>Hadoop Security</description>
-  </property>
-  <property>
-    <name>kerberos_domain</name>
-    <value>EXAMPLE.COM</value>
-    <description>Kerberos realm.</description>
-  </property>
-  <property>
-    <name>kadmin_pw</name>
-    <value></value>
-    <description>Kerberos realm admin password</description>
-  </property>
-  <property>
-    <name>keytab_path</name>
-    <value>/etc/security/keytabs</value>
-    <description>Kerberos keytab path.</description>
-  </property>
-  
-  <property>
-    <name>keytab_path</name>
-    <value>/etc/security/keytabs</value>
-    <description>KeyTab Directory.</description>
-  </property>
-    <property>
-    <name>namenode_formatted_mark_dir</name>
-    <value>/var/run/hadoop/hdfs/namenode/formatted/</value>
-    <description>Formatteed Mark Directory.</description>
-  </property>
-    <property>
-    <name>hdfs_user</name>
-    <value>hdfs</value>
-    <description>User and Groups.</description>
-  </property>
-  <property>
-    <name>lzo_enabled</name>
-    <value>true</value>
-    <description>LZO compression enabled</description>
-  </property>
-  
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/configuration/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/configuration/hadoop-policy.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/configuration/hadoop-policy.xml
deleted file mode 100644
index 51b01bb..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/configuration/hadoop-policy.xml
+++ /dev/null
@@ -1,134 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientProtocol, which is used by user code
-    via the DistributedFileSystem.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.client.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
-    for block recovery.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for DatanodeProtocol, which is used by datanodes to
-    communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
-    for updating generation timestamp.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.namenode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for NamenodeProtocol, the protocol used by the secondary
-    namenode to communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.tracker.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterTrackerProtocol, used by the tasktrackers to
-    communicate with the jobtracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.job.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for JobSubmissionProtocol, used by job clients to
-    communciate with the jobtracker for job submission, querying job status etc.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.job.task.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
-    tasks to communicate with the parent tasktracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
- <property>
-    <name>security.admin.operations.protocol.acl</name>
-    <value>hadoop</value>
-    <description>ACL for AdminOperationsProtocol. Used for admin commands.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.refresh.usertogroups.mappings.protocol.acl</name>
-    <value>hadoop</value>
-    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
-    users mappings. The ACL is a comma-separated list of user and
-    group names. The user and group list is separated by a blank. For
-    e.g. "alice,bob users,wheel".  A special value of "*" means all
-    users are allowed.</description>
-  </property>
-
-<property>
-    <name>security.refresh.policy.protocol.acl</name>
-    <value>hadoop</value>
-    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
-    dfsadmin and mradmin commands to refresh the security policy in-effect.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/configuration/hdfs-site.xml
deleted file mode 100644
index 252e84c..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/configuration/hdfs-site.xml
+++ /dev/null
@@ -1,462 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-<!-- file system properties -->
-
-  <property>
-    <name>dfs.namenode.name.dir</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>Determines where on the local filesystem the DFS name node
-      should store the name table.  If this is a comma-delimited list
-      of directories then the name table is replicated in all of the
-      directories, for redundancy. </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.support.append</name>
-    <value>true</value>
-    <description>to enable dfs append</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.webhdfs.enabled</name>
-    <value>true</value>
-    <description>to enable webhdfs</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.datanode.failed.volumes.tolerated</name>
-    <value>0</value>
-    <description>#of failed disks dn would tolerate</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.datanode.data.dir</name>
-    <value></value>
-    <description>Determines where on the local filesystem an DFS data node
-  should store its blocks.  If this is a comma-delimited
-  list of directories, then data will be stored in all named
-  directories, typically on different devices.
-  Directories that do not exist are ignored.
-  </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.hosts.exclude</name>
-    <value></value>
-    <description>Names a file that contains a list of hosts that are
-    not permitted to connect to the namenode.  The full pathname of the
-    file must be specified.  If the value is empty, no hosts are
-    excluded.</description>
-  </property>
-
-<!--
-  <property>
-    <name>dfs.hosts</name>
-    <value></value>
-    <description>Names a file that contains a list of hosts that are
-    permitted to connect to the namenode. The full pathname of the file
-    must be specified.  If the value is empty, all hosts are
-    permitted.</description>
-  </property>
--->
-
-  <property>
-    <name>dfs.namenode.checkpoint.dir</name>
-    <value></value>
-    <description>Determines where on the local filesystem the DFS secondary
-      name node should store the temporary images to merge.
-      If this is a comma-delimited list of directories then the image is
-      replicated in all of the directories for redundancy.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.checkpoint.edits.dir</name>
-    <value>${dfs.namenode.checkpoint.dir}</value>
-    <description>Determines where on the local filesystem the DFS secondary
-      name node should store the temporary edits to merge.
-      If this is a comma-delimited list of directoires then teh edits is
-      replicated in all of the directoires for redundancy.
-      Default value is same as dfs.namenode.checkpoint.dir
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.checkpoint.period</name>
-    <value></value>
-    <description>The number of seconds between two periodic checkpoints.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.replication.max</name>
-    <value>50</value>
-    <description>Maximal block replication.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.replication</name>
-    <value>3</value>
-    <description>Default block replication.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.heartbeat.interval</name>
-    <value>3</value>
-    <description>Determines datanode heartbeat interval in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.heartbeat.interval</name>
-    <value>3</value>
-    <description>Determines datanode heartbeat interval in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.safemode.threshold-pct</name>
-    <value>1.0f</value>
-    <description>
-        Specifies the percentage of blocks that should satisfy
-        the minimal replication requirement defined by dfs.namenode.replication.min.
-        Values less than or equal to 0 mean not to start in safe mode.
-        Values greater than 1 will make safe mode permanent.
-        </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.balance.bandwidthPerSec</name>
-    <value>6250000</value>
-    <description>
-        Specifies the maximum amount of bandwidth that each datanode
-        can utilize for the balancing purpose in term of
-        the number of bytes per second.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.https.port</name>
-    <value>50070</value>
-    <description>
-      This property is used by HftpFileSystem.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.address</name>
-    <value>0.0.0.0:50010</value>
-  </property>
-
-  <property>
-    <name>dfs.datanode.http.address</name>
-    <value>0.0.0.0:50075</value>
-  </property>
-
-  <property>
-    <name>dfs.blocksize</name>
-    <value>134217728</value>
-    <description>The default block size for new files.</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.http-address</name>
-    <value></value>
-<description>The name of the default file system.  Either the
-literal string "local" or a host:port for NDFS.</description>
-<final>true</final>
-</property>
-
-<property>
-<name>dfs.datanode.du.reserved</name>
-<!-- cluster variant -->
-<value>1073741824</value>
-<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
-</description>
-</property>
-
-<property>
-<name>dfs.datanode.ipc.address</name>
-<value>0.0.0.0:8010</value>
-<description>
-The datanode ipc server address and port.
-If the port is 0 then the server will start on a free port.
-</description>
-</property>
-
-<property>
-<name>dfs.blockreport.initialDelay</name>
-<value>120</value>
-<description>Delay for first block report in seconds.</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>40</value>
-<description>The number of server threads for the namenode.</description>
-</property>
-
-<property>
-<name>dfs.datanode.max.transfer.threads</name>
-<value>1024</value>
-<description>PRIVATE CONFIG VARIABLE</description>
-</property>
-
-<!-- Permissions configuration -->
-
-<property>
-<name>fs.permissions.umask-mode</name>
-<value>022</value>
-<description>
-The octal umask used when creating files and directories.
-</description>
-</property>
-
-<property>
-<name>dfs.permissions.enabled</name>
-<value>true</value>
-<description>
-If "true", enable permission checking in HDFS.
-If "false", permission checking is turned off,
-but all other behavior is unchanged.
-Switching from one parameter value to the other does not change the mode,
-owner or group of files or directories.
-</description>
-</property>
-
-<property>
-<name>dfs.permissions.superusergroup</name>
-<value>hdfs</value>
-<description>The name of the group of super-users.</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>100</value>
-<description>Added to grow Queue size so that more client connections are allowed</description>
-</property>
-
-<property>
-<name>dfs.block.access.token.enable</name>
-<value>true</value>
-<description>
-If "true", access tokens are used as capabilities for accessing datanodes.
-If "false", no access tokens are checked on accessing datanodes.
-</description>
-</property>
-
-<property>
-<name>dfs.namenode.kerberos.principal</name>
-<value></value>
-<description>
-Kerberos principal name for the NameNode
-</description>
-</property>
-
-<property>
-<name>dfs.secondary.namenode.kerberos.principal</name>
-<value></value>
-    <description>
-        Kerberos principal name for the secondary NameNode.
-    </description>
-  </property>
-
-
-<!--
-  This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
--->
-  <property>
-    <name>dfs.namenode.kerberos.https.principal</name>
-    <value></value>
-     <description>The Kerberos principal for the host that the NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.kerberos.https.principal</name>
-    <value></value>
-    <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <!-- cluster variant -->
-    <name>dfs.namenode.secondary.http-address</name>
-    <value></value>
-    <description>Address of secondary namenode web server</description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.principal</name>
-    <value></value>
-    <description>
-      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
-      HTTP SPENGO specification.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.keytab</name>
-    <value></value>
-    <description>
-      The Kerberos keytab file with the credentials for the
-      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.kerberos.principal</name>
-    <value></value>
- <description>
-        The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.keytab.file</name>
-    <value></value>
- <description>
-        Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.keytab.file</name>
-    <value></value>
-  <description>
-        Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.keytab.file</name>
-    <value></value>
- <description>
-        The filename of the keytab file for the DataNode.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.https-address</name>
-    <value></value>
-  <description>The https address where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.datanode.data.dir.perm</name>
-    <value>750</value>
-<description>The permissions that should be there on dfs.datanode.data.dir
-directories. The datanode will not come up if the permissions are
-different on existing dfs.datanode.data.dir directories. If the directories
-don't exist, they will be created with this permission.</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.accesstime.precision</name>
-    <value>0</value>
-    <description>The access time for HDFS file is precise upto this value.
-                 The default value is 1 hour. Setting a value of 0 disables
-                 access times for HDFS.
-    </description>
-  </property>
-
-  <property>
-   <name>dfs.cluster.administrators</name>
-   <value> hdfs</value>
-   <description>ACL for who all can view the default servlets in the HDFS</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.avoid.read.stale.datanode</name>
-    <value>true</value>
-    <description>
-      Indicate whether or not to avoid reading from stale datanodes whose
-      heartbeat messages have not been received by the namenode for more than a
-      specified time interval.
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.avoid.write.stale.datanode</name>
-    <value>true</value>
-    <description>
-      Indicate whether or not to avoid writing to stale datanodes whose
-      heartbeat messages have not been received by the namenode for more than a
-      specified time interval.
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.write.stale.datanode.ratio</name>
-    <value>1.0f</value>
-    <description>When the ratio of number stale datanodes to total datanodes marked is greater
-      than this ratio, stop avoiding writing to stale nodes so as to prevent causing hotspots.
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.stale.datanode.interval</name>
-    <value>30000</value>
-    <description>Datanode is stale after not getting a heartbeat in this interval in ms</description>
-  </property>
-
-  <!-- HDFS Short-Circuit Local Reads -->
-
-  <property>
-    <name>dfs.client.read.shortcircuit</name>
-    <value>true</value>
-    <description>
-      This configuration parameter turns on short-circuit local reads.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.client.read.shortcircuit.skip.checksum</name>
-    <value></value>
-    <description>Enable/disbale skipping the checksum check</description>
-  </property>
-
-  <property>
-    <name>dfs.domain.socket.path</name>
-    <value>/var/lib/hadoop-hdfs/dn_socket</value>
-  </property>
-
-  <property>
-    <name>dfs.client.read.shortcircuit.streams.cache.size</name>
-    <value>4096</value>
-    <description>
-      The DFSClient maintains a cache of recently opened file descriptors. This
-      parameter controls the size of that cache. Setting this higher will use
-      more file descriptors, but potentially provide better performance on
-      workloads involving lots of seeks.
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/metainfo.xml
deleted file mode 100644
index f6f7586..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HDFS/metainfo.xml
+++ /dev/null
@@ -1,57 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Apache Hadoop Distributed File System</comment>
-    <version>2.1.0.2.0.5.0</version>
-
-    <components>
-        <component>
-            <name>NAMENODE</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>DATANODE</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>SECONDARY_NAMENODE</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>HDFS_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-        
-        <component>
-            <name>JOURNALNODE</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-          <name>ZKFC</name>
-          <category>SLAVE</category>
-        </component>
-        
-    </components>
-
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HIVE/configuration/hive-site.xml
deleted file mode 100644
index f8ad94a..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HIVE/configuration/hive-site.xml
+++ /dev/null
@@ -1,261 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<configuration>
-  <property>
-    <name>hive.metastore.local</name>
-    <value>false</value>
-    <description>controls whether to connect to remove metastore server or
-    open a new metastore server in Hive Client JVM</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionURL</name>
-    <value></value>
-    <description>JDBC connect string for a JDBC metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionDriverName</name>
-    <value>com.mysql.jdbc.Driver</value>
-    <description>Driver class name for a JDBC metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionUserName</name>
-    <value></value>
-    <description>username to use against metastore database</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionPassword</name>
-    <value></value>
-    <description>password to use against metastore database</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.warehouse.dir</name>
-    <value>/apps/hive/warehouse</value>
-    <description>location of default database for the warehouse</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.sasl.enabled</name>
-    <value></value>
-    <description>If true, the metastore thrift interface will be secured with SASL.
-     Clients must authenticate with Kerberos.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.kerberos.keytab.file</name>
-    <value></value>
-    <description>The path to the Kerberos Keytab file containing the metastore
-     thrift server's service principal.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.kerberos.principal</name>
-    <value></value>
-    <description>The service principal for the metastore thrift server. The special
-    string _HOST will be replaced automatically with the correct host name.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.cache.pinobjtypes</name>
-    <value>Table,Database,Type,FieldSchema,Order</value>
-    <description>List of comma separated metastore object types that should be pinned in the cache</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.uris</name>
-    <value></value>
-    <description>URI for client to contact metastore server</description>
-  </property>
-
-  <property>
-    <name>hive.semantic.analyzer.factory.impl</name>
-    <value>org.apache.hivealog.cli.HCatSemanticAnalyzerFactory</value>
-    <description>controls which SemanticAnalyzerFactory implemenation class is used by CLI</description>
-  </property>
-
-  <property>
-    <name>hadoop.clientside.fs.operations</name>
-    <value>true</value>
-    <description>FS operations are owned by client</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.client.socket.timeout</name>
-    <value>60</value>
-    <description>MetaStore Client socket timeout in seconds</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.execute.setugi</name>
-    <value>true</value>
-    <description>In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.</description>
-  </property>
-
-  <property>
-    <name>hive.security.authorization.enabled</name>
-    <value>false</value>
-    <description>enable or disable the hive client authorization</description>
-  </property>
-
-  <property>
-    <name>hive.security.authorization.manager</name>
-    <value>org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider</value>
-    <description>the hive client authorization manager class name.
-    The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
-  </property>
-
-  <property>
-    <name>hive.server2.enable.doAs</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>fs.hdfs.impl.disable.cache</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>fs.file.impl.disable.cache</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.enforce.bucketing</name>
-    <value>true</value>
-    <description>Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced.</description>
-  </property>
-
-  <property>
-    <name>hive.enforce.sorting</name>
-    <value>true</value>
-    <description>Whether sorting is enforced. If true, while inserting into the table, sorting is enforced.</description>
-  </property>
-
-  <property>
-    <name>hive.map.aggr</name>
-    <value>true</value>
-    <description>Whether to use map-side aggregation in Hive Group By queries.</description>
-  </property>
-
-  <property>
-    <name>hive.optimize.bucketmapjoin</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.optimize.bucketmapjoin.sortedmerge</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.mapred.reduce.tasks.speculative.execution</name>
-    <value>false</value>
-    <description>Whether speculative execution for reducers should be turned on.</description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join</name>
-    <value>true</value>
-    <description>Whether Hive enable the optimization about converting common
-      join into mapjoin based on the input file size.</description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.sortmerge.join</name>
-    <value>true</value>
-    <description>Will the join be automatically converted to a sort-merge join, if the joined tables pass
-      the criteria for sort-merge join.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.sortmerge.join.noconditionaltask</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join.noconditionaltask</name>
-    <value>true</value>
-    <description>Whether Hive enable the optimization about converting common join into mapjoin based on the input file
-      size. If this paramater is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the
-      specified size, the join is directly converted to a mapjoin (there is no conditional task).
-    </description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join.noconditionaltask.size</name>
-    <value>1000000000</value>
-    <description>If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. However, if it
-      is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, the join is directly
-      converted to a mapjoin(there is no conditional task). The default is 10MB.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.optimize.reducededuplication.min.reducer</name>
-    <value>1</value>
-    <description>Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS.
-      That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.
-      The optimization will be disabled if number of reducers is less than specified value.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.optimize.mapjoin.mapreduce</name>
-    <value>true</value>
-    <description>If hive.auto.convert.join is off, this parameter does not take
-      affect. If it is on, and if there are map-join jobs followed by a map-reduce
-      job (for e.g a group by), each map-only job is merged with the following
-      map-reduce job.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.mapjoin.bucket.cache.size</name>
-    <value>10000</value>
-    <description>
-      Size per reducer.The default is 1G, i.e if the input size is 10G, it
-      will use 10 reducers.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.vectorized.execution.enabled</name>
-    <value>false</value>
-  </property>
-
-  <property>
-    <name>hive.optimize.reducededuplication</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.optimize.index.filter</name>
-    <value>true</value>
-    <description>
-    Whether to enable automatic use of indexes
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HIVE/metainfo.xml
deleted file mode 100644
index 7c5ad04..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/HIVE/metainfo.xml
+++ /dev/null
@@ -1,43 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
-    <version>0.11.0.2.0.5.0</version>
-
-    <components>        
-        <component>
-            <name>HIVE_METASTORE</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>HIVE_SERVER</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>MYSQL_SERVER</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>HIVE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/MAPREDUCE2/configuration/container-executor.cfg
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/MAPREDUCE2/configuration/container-executor.cfg b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/MAPREDUCE2/configuration/container-executor.cfg
deleted file mode 100644
index 502ddaa..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/MAPREDUCE2/configuration/container-executor.cfg
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-yarn.nodemanager.local-dirs=TODO-YARN-LOCAL-DIR
-yarn.nodemanager.linux-container-executor.group=hadoop
-yarn.nodemanager.log-dirs=TODO-YARN-LOG-DIR
-banned.users=hfds,bin,0

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/MAPREDUCE2/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/MAPREDUCE2/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/MAPREDUCE2/configuration/core-site.xml
deleted file mode 100644
index 3a2af49..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/MAPREDUCE2/configuration/core-site.xml
+++ /dev/null
@@ -1,20 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<configuration>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/MAPREDUCE2/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/MAPREDUCE2/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/MAPREDUCE2/configuration/global.xml
deleted file mode 100644
index ceedd56..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/MAPREDUCE2/configuration/global.xml
+++ /dev/null
@@ -1,44 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>hs_host</name>
-    <value></value>
-    <description>History Server.</description>
-  </property>
-  <property>
-    <name>mapred_log_dir_prefix</name>
-    <value>/var/log/hadoop-mapreduce</value>
-    <description>Mapreduce Log Dir Prefix</description>
-  </property>
-  <property>
-    <name>mapred_pid_dir_prefix</name>
-    <value>/var/run/hadoop-mapreduce</value>
-    <description>Mapreduce PID Dir Prefix</description>
-  </property>
-  <property>
-    <name>mapred_user</name>
-    <value>mapred</value>
-    <description>Mapreduce User</description>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/MAPREDUCE2/configuration/mapred-queue-acls.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/MAPREDUCE2/configuration/mapred-queue-acls.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/MAPREDUCE2/configuration/mapred-queue-acls.xml
deleted file mode 100644
index ce12380..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/MAPREDUCE2/configuration/mapred-queue-acls.xml
+++ /dev/null
@@ -1,39 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- mapred-queue-acls.xml -->
-<configuration>
-
-
-<!-- queue default -->
-
-  <property>
-    <name>mapred.queue.default.acl-submit-job</name>
-    <value>*</value>
-  </property>
-
-  <property>
-    <name>mapred.queue.default.acl-administer-jobs</name>
-    <value>*</value>
-  </property>
-
-  <!-- END ACLs -->
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/MAPREDUCE2/configuration/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/MAPREDUCE2/configuration/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/MAPREDUCE2/configuration/mapred-site.xml
deleted file mode 100644
index 6ed35f1..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/MAPREDUCE2/configuration/mapred-site.xml
+++ /dev/null
@@ -1,392 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-  <!-- i/o properties -->
-
-  <property>
-    <name>mapreduce.task.io.sort.mb</name>
-    <value>100</value>
-    <description>
-      The total amount of buffer memory to use while sorting files, in megabytes.
-      By default, gives each merge stream 1MB, which should minimize seeks.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.map.sort.spill.percent</name>
-    <value>0.1</value>
-    <description>
-      The soft limit in the serialization buffer. Once reached, a thread will
-      begin to spill the contents to disk in the background. Note that
-      collection will not block if this threshold is exceeded while a spill
-      is already in progress, so spills may be larger than this threshold when
-      it is set to less than .5
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.task.io.sort.factor</name>
-    <value>100</value>
-    <description>
-      The number of streams to merge at once while sorting files.
-      This determines the number of open file handles.
-    </description>
-  </property>
-
-  <!-- map/reduce properties -->
-  <property>
-    <name>mapreduce.cluster.administrators</name>
-    <value> hadoop</value>
-    <description>
-      Administrators for MapReduce applications.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.shuffle.parallelcopies</name>
-    <value>30</value>
-    <description>
-      The default number of parallel transfers run by reduce during
-      the copy(shuffle) phase.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.map.speculative</name>
-    <value>false</value>
-    <description>
-      If true, then multiple instances of some map tasks
-      may be executed in parallel.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.speculative</name>
-    <value>false</value>
-    <description>
-      If true, then multiple instances of some reduce tasks may be
-      executed in parallel.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.job.reduce.slowstart.completedmaps</name>
-    <value>0.05</value>
-    <description>
-      Fraction of the number of maps in the job which should be complete before
-      reduces are scheduled for the job.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.shuffle.merge.percent</name>
-    <value>0.66</value>
-    <description>
-      The usage threshold at which an in-memory merge will be
-      initiated, expressed as a percentage of the total memory allocated to
-      storing in-memory map outputs, as defined by
-      mapreduce.reduce.shuffle.input.buffer.percent.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.shuffle.input.buffer.percent</name>
-    <value>0.7</value>
-    <description>
-      The percentage of memory to be allocated from the maximum heap
-      size to storing map outputs during the shuffle.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.map.output.compress.codec</name>
-    <value></value>
-    <description>If the map outputs are compressed, how should they be
-      compressed
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.output.fileoutputformat.compress.type</name>
-    <value>BLOCK</value>
-    <description>
-      If the job outputs are to compressed as SequenceFiles, how should
-      they be compressed? Should be one of NONE, RECORD or BLOCK.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.input.buffer.percent</name>
-    <value>0.0</value>
-    <description>
-      The percentage of memory- relative to the maximum heap size- to
-      retain map outputs during the reduce. When the shuffle is concluded, any
-      remaining map outputs in memory must consume less than this threshold before
-      the reduce can begin.
-    </description>
-  </property>
-
-  <!-- copied from kryptonite configuration -->
-  <property>
-    <name>mapreduce.map.output.compress</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapreduce.task.timeout</name>
-    <value>600000</value>
-    <description>
-      The number of milliseconds before a task will be
-      terminated if it neither reads an input, writes an output, nor
-      updates its status string.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.map.memory.mb</name>
-    <value>1536</value>
-    <description>Virtual memory for single Map task</description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.memory.mb</name>
-    <value>1024</value>
-    <description>Virtual memory for single Reduce task</description>
-  </property>
-
-  <property>
-    <name>mapreduce.tasktracker.keytab.file</name>
-    <value></value>
-    <description>The filename of the keytab for the task tracker</description>
-  </property>
-
-  <property>
-    <name>mapreduce.jobhistory.keytab.file</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>The keytab for the job history server principal.</description>
-  </property>
-
-  <property>
-    <name>mapreduce.shuffle.port</name>
-    <value>13562</value>
-    <description>
-      Default port that the ShuffleHandler will run on.
-      ShuffleHandler is a service run at the NodeManager to facilitate
-      transfers of intermediate Map outputs to requesting Reducers.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.jobhistory.intermediate-done-dir</name>
-    <value>/mr-history/tmp</value>
-    <description>
-      Directory where history files are written by MapReduce jobs.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.jobhistory.done-dir</name>
-    <value>/mr-history/done</value>
-    <description>
-      Directory where history files are managed by the MR JobHistory Server.
-    </description>
-  </property>
-
-  <property>   ย ย ย ย 
-    <name>mapreduce.jobhistory.address</name>
-    <value>localhost:10020</value>
-    <description>Enter your JobHistoryServer hostname.</description>
-  </property>
-
-  <property>   ย ย ย ย 
-    <name>mapreduce.jobhistory.webapp.address</name>
-    <value>localhost:19888</value>
-    <description>Enter your JobHistoryServer hostname.</description>
-  </property>
-
-  <property>
-    <name>mapreduce.framework.name</name>
-    <value>yarn</value>
-    <description>
-      The runtime framework for executing MapReduce jobs. Can be one of local,
-      classic or yarn.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.staging-dir</name>
-    <value>/user</value>
-    <description>
-      The staging dir used while submitting jobs.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.resource.mb</name>
-    <value>1024</value>
-    <description>The amount of memory the MR AppMaster needs.</description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.command-opts</name>
-    <value>-Xmx756m</value>
-    <description>
-      Java opts for the MR App Master processes.
-      The following symbol, if present, will be interpolated: @taskid@ is replaced
-      by current TaskID. Any other occurrences of '@' will go unchanged.
-      For example, to enable verbose gc logging to a file named for the taskid in
-      /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of:
-      -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc
-
-      Usage of -Djava.library.path can cause programs to no longer function if
-      hadoop native libraries are used. These values should instead be set as part
-      of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
-      mapreduce.reduce.env config settings.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.admin-command-opts</name>
-    <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
-    <description>
-      Java opts for the MR App Master processes for admin purposes.
-      It will appears before the opts set by yarn.app.mapreduce.am.command-opts and
-      thus its options can be overridden user.
-
-      Usage of -Djava.library.path can cause programs to no longer function if
-      hadoop native libraries are used. These values should instead be set as part
-      of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
-      mapreduce.reduce.env config settings.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.log.level</name>
-    <value>INFO</value>
-    <description>MR App Master process log level.</description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.env</name>
-    <value></value>
-    <description>
-      User added environment variables for the MR App Master
-      processes. Example :
-      1) A=foo  This will set the env variable A to foo
-      2) B=$B:c This is inherit tasktracker's B env variable.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.admin.map.child.java.opts</name>
-    <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
-  </property>
-
-  <property>
-    <name>mapreduce.admin.reduce.child.java.opts</name>
-    <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
-  </property>
-
-  <property>
-    <name>mapreduce.application.classpath</name>
-    <value>$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*</value>
-    <description>
-      CLASSPATH for MR applications. A comma-separated list of CLASSPATH
-      entries.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.am.max-attempts</name>
-    <value>2</value>
-    <description>
-      The maximum number of application attempts. It is a
-      application-specific setting. It should not be larger than the global number
-      set by resourcemanager. Otherwise, it will be override. The default number is
-      set to 2, to allow at least one retry for AM.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.map.memory.mb</name>
-    <value>512</value>
-    <description>
-      Larger resource limit for maps.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.map.java.opts</name>
-    <value>-Xmx320m</value>
-    <description>
-      Larger heap-size for child jvms of maps.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.memory.mb</name>
-    <value>1024</value>
-    <description>
-      Larger resource limit for reduces.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.java.opts</name>
-    <value>-Xmx756m</value>
-    <description>
-      Larger heap-size for child jvms of reduces.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.map.log.level</name>
-    <value>INFO</value>
-    <description>
-      The logging level for the map task. The allowed levels are:
-      OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.log.level</name>
-    <value>INFO</value>
-    <description>
-      The logging level for the reduce task. The allowed levels are:
-      OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.admin.user.env</name>
-    <value>LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/`$JAVA_HOME/bin/java -d32 -version &amp;&gt; /dev/null;if [ $? -eq 0 ]; then echo Linux-i386-32; else echo Linux-amd64-64;fi`</value>
-    <description>
-      Additional execution environment entries for map and reduce task processes.
-      This is not an additive property. You must preserve the original value if
-      you want your map and reduce tasks to have access to native libraries (compression, etc)
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/MAPREDUCE2/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/MAPREDUCE2/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/MAPREDUCE2/metainfo.xml
deleted file mode 100644
index 363bf12..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/MAPREDUCE2/metainfo.xml
+++ /dev/null
@@ -1,32 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>mapred</user>
-    <comment>Apache Hadoop NextGen MapReduce (client libraries)</comment>
-    <version>2.1.0.2.0.5.0</version>
-    <components>
-        <component>
-            <name>HISTORYSERVER</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>MAPREDUCE2_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/NAGIOS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/NAGIOS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/NAGIOS/metainfo.xml
deleted file mode 100644
index 76471cf..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/NAGIOS/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Nagios Monitoring and Alerting system</comment>
-    <version>3.5.0</version>
-
-    <components>
-        <component>
-            <name>NAGIOS_SERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/OOZIE/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/OOZIE/configuration/oozie-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/OOZIE/configuration/oozie-site.xml
deleted file mode 100644
index 4b1da1b..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/OOZIE/configuration/oozie-site.xml
+++ /dev/null
@@ -1,245 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-        
-       http://www.apache.org/licenses/LICENSE-2.0
-  
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->     
-
-<configuration>
-
-<!--
-    Refer to the oozie-default.xml file for the complete list of
-    Oozie configuration properties and their default values.
--->
-  <property>
-    <name>oozie.base.url</name>
-    <value>http://localhost:11000/oozie</value>
-    <description>Base Oozie URL.</description>
-   </property>
-
-  <property>
-    <name>oozie.system.id</name>
-    <value>oozie-${user.name}</value>
-    <description>
-    The Oozie system ID.
-    </description>
-   </property>
-
-   <property>
-     <name>oozie.systemmode</name>
-     <value>NORMAL</value>
-     <description>
-     System mode for  Oozie at startup.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.AuthorizationService.security.enabled</name>
-     <value>true</value>
-     <description>
-     Specifies whether security (user name/admin role) is enabled or not.
-     If disabled any user can manage Oozie system and manage any job.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.PurgeService.older.than</name>
-     <value>30</value>
-     <description>
-     Jobs older than this value, in days, will be purged by the PurgeService.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.PurgeService.purge.interval</name>
-     <value>3600</value>
-     <description>
-     Interval at which the purge service will run, in seconds.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.queue.size</name>
-     <value>1000</value>
-     <description>Max callable queue size</description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.threads</name>
-     <value>10</value>
-     <description>Number of threads used for executing callables</description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.callable.concurrency</name>
-     <value>3</value>
-     <description>
-     Maximum concurrency for a given callable type.
-     Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
-     Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
-     All commands that use action executors (action-start, action-end, action-kill and action-check) use
-     the action type as the callable type.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.coord.normal.default.timeout</name>
-     <value>120</value>
-     <description>Default timeout for a coordinator action input check (in minutes) for normal job.
-      -1 means infinite timeout</description>
-   </property>
-
-   <property>
-     <name>oozie.db.schema.name</name>
-     <value>oozie</value>
-     <description>
-      Oozie DataBase Name
-     </description>
-   </property>
-
-    <property>
-      <name>oozie.service.HadoopAccessorService.jobTracker.whitelist</name>
-      <value> </value>
-      <description>
-      Whitelisted job tracker for Oozie service.
-      </description>
-    </property>
-   
-    <property>
-      <name>oozie.authentication.type</name>
-      <value>simple</value>
-      <description>
-      </description>
-    </property>
-   
-    <property>
-      <name>oozie.service.HadoopAccessorService.nameNode.whitelist</name>
-      <value> </value>
-      <description>
-      </description>
-    </property>
-
-    <property>
-      <name>oozie.service.WorkflowAppService.system.libpath</name>
-      <value>/user/${user.name}/share/lib</value>
-      <description>
-      System library path to use for workflow applications.
-      This path is added to workflow application if their job properties sets
-      the property 'oozie.use.system.libpath' to true.
-      </description>
-    </property>
-
-    <property>
-      <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
-      <value>false</value>
-      <description>
-      If set to true, submissions of MapReduce and Pig jobs will include
-      automatically the system library path, thus not requiring users to
-      specify where the Pig JAR files are. Instead, the ones from the system
-      library path are used.
-      </description>
-    </property>
-    <property>
-      <name>oozie.authentication.kerberos.name.rules</name>
-      <value>
-        RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/
-        RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/
-        RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
-        RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
-        DEFAULT
-        </value>
-      <description>The mapping from kerberos principal names to local OS user names.</description>
-    </property>
-    <property>
-      <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
-      <value>*=/etc/hadoop/conf</value>
-      <description>
-          Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
-          the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
-          used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
-          the relevant Hadoop *-site.xml files. If the path is relative is looked within
-          the Oozie configuration directory; though the path can be absolute (i.e. to point
-          to Hadoop client conf/ directories in the local filesystem.
-      </description>
-    </property>
-    <property>
-        <name>oozie.service.ActionService.executor.ext.classes</name>
-        <value>
-            org.apache.oozie.action.email.EmailActionExecutor,
-            org.apache.oozie.action.hadoop.HiveActionExecutor,
-            org.apache.oozie.action.hadoop.ShellActionExecutor,
-            org.apache.oozie.action.hadoop.SqoopActionExecutor,
-            org.apache.oozie.action.hadoop.DistcpActionExecutor
-        </value>
-    </property>
-
-    <property>
-        <name>oozie.service.SchemaService.wf.ext.schemas</name>
-        <value>shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd,shell-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd</value>
-    </property>
-    <property>
-        <name>oozie.service.JPAService.create.db.schema</name>
-        <value>false</value>
-        <description>
-            Creates Oozie DB.
-
-            If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
-            If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.driver</name>
-        <value>org.apache.derby.jdbc.EmbeddedDriver</value>
-        <description>
-            JDBC driver class.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.url</name>
-        <value>jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true</value>
-        <description>
-            JDBC URL.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.username</name>
-        <value>sa</value>
-        <description>
-            DB user name.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.password</name>
-        <value> </value>
-        <description>
-            DB user password.
-
-            IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
-                       if empty Configuration assumes it is NULL.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.pool.max.active.conn</name>
-        <value>10</value>
-        <description>
-             Max number of connections.
-        </description>
-    </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/OOZIE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/OOZIE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/OOZIE/metainfo.xml
deleted file mode 100644
index 05e093d..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/OOZIE/metainfo.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>System for workflow coordination and execution of Apache Hadoop jobs</comment>
-    <version>4.0.0.2.0.5.0</version>
-
-    <components>
-        <component>
-            <name>OOZIE_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>OOZIE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>


[11/14] AMBARI-3777. Remove HDPLocal stack from stack definition. (yusaku)

Posted by yu...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/WEBHCAT/configuration/webhcat-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/WEBHCAT/configuration/webhcat-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/WEBHCAT/configuration/webhcat-site.xml
deleted file mode 100644
index 31d0113..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/WEBHCAT/configuration/webhcat-site.xml
+++ /dev/null
@@ -1,126 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- 
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<!-- The default settings for Templeton. -->
-<!-- Edit templeton-site.xml to change settings for your local -->
-<!-- install. -->
-
-<configuration>
-
-  <property>
-    <name>templeton.port</name>
-      <value>50111</value>
-    <description>The HTTP port for the main server.</description>
-  </property>
-
-  <property>
-    <name>templeton.hadoop.conf.dir</name>
-    <value>/etc/hadoop/conf</value>
-    <description>The path to the Hadoop configuration.</description>
-  </property>
-
-  <property>
-    <name>templeton.jar</name>
-    <value>/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar</value>
-    <description>The path to the Templeton jar file.</description>
-  </property>
-
-  <property>
-    <name>templeton.libjars</name>
-    <value>/usr/lib/zookeeper/zookeeper.jar</value>
-    <description>Jars to add the the classpath.</description>
-  </property>
-
-
-  <property>
-    <name>templeton.hadoop</name>
-    <value>/usr/bin/hadoop</value>
-    <description>The path to the Hadoop executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.archive</name>
-    <value>hdfs:///apps/webhcat/pig.tar.gz</value>
-    <description>The path to the Pig archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.path</name>
-    <value>pig.tar.gz/pig/bin/pig</value>
-    <description>The path to the Pig executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hcat</name>
-    <value>/usr/bin/hcat</value>
-    <description>The path to the hcatalog executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.archive</name>
-    <value>hdfs:///apps/webhcat/hive.tar.gz</value>
-    <description>The path to the Hive archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.path</name>
-    <value>hive.tar.gz/hive/bin/hive</value>
-    <description>The path to the Hive executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.properties</name>
-    <value></value>
-    <description>Properties to set when running hive.</description>
-  </property>
-
-
-  <property>
-    <name>templeton.zookeeper.hosts</name>
-    <value></value>
-    <description>ZooKeeper servers, as comma separated host:port pairs</description>
-  </property>
-
-  <property>
-    <name>templeton.storage.class</name>
-    <value>org.apache.hcatalog.templeton.tool.ZooKeeperStorage</value>
-    <description>The class to use as storage</description>
-  </property>
-
-  <property>
-   <name>templeton.override.enabled</name>
-   <value>false</value>
-   <description>
-     Enable the override path in templeton.override.jars
-   </description>
- </property>
-
- <property>
-    <name>templeton.streaming.jar</name>
-    <value>hdfs:///apps/webhcat/hadoop-streaming.jar</value>
-    <description>The hdfs path to the Hadoop streaming jar file.</description>
-  </property> 
-
-  <property>
-    <name>templeton.exec.timeout</name>
-    <value>60000</value>
-    <description>Time out for templeton api</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/WEBHCAT/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/WEBHCAT/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/WEBHCAT/metainfo.xml
deleted file mode 100644
index e65992f..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/WEBHCAT/metainfo.xml
+++ /dev/null
@@ -1,31 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for WEBHCAT service</comment>
-    <version>0.5.0</version>
-
-    <components>
-        <component>
-            <name>WEBHCAT_SERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/ZOOKEEPER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/ZOOKEEPER/metainfo.xml
deleted file mode 100644
index fc0c3b5..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/ZOOKEEPER/metainfo.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Centralized service which provides highly reliable distributed coordination</comment>
-    <version>3.4.5</version>
-
-    <components>
-        <component>
-            <name>ZOOKEEPER_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>ZOOKEEPER_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/metainfo.xml
deleted file mode 100644
index 45a63e5..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/metainfo.xml
+++ /dev/null
@@ -1,22 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <versions>
-	  <active>false</active>
-    </versions>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/repos/repoinfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/repos/repoinfo.xml
deleted file mode 100644
index daad89d..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/repos/repoinfo.xml
+++ /dev/null
@@ -1,75 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<reposinfo>
-  <os type="centos6">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/1.x/GA/1.3.0.0</baseurl>
-      <repoid>HDP-1.3.0</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="centos5">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos5/1.x/GA/1.3.0.0</baseurl>
-      <repoid>HDP-1.3.0</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="redhat6">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/1.x/GA/1.3.0.0</baseurl>
-      <repoid>HDP-1.3.0</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="redhat5">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos5/1.x/GA/1.3.0.0</baseurl>
-      <repoid>HDP-1.3.0</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="oraclelinux6">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/1.x/GA/1.3.0.0</baseurl>
-      <repoid>HDP-1.3.0</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="oraclelinux5">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos5/1.x/GA/1.3.0.0</baseurl>
-      <repoid>HDP-1.3.0</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="suse11">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/suse11/1.x/GA/1.3.0.0</baseurl>
-      <repoid>HDP-1.3.0</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-    <os type="sles11">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/suse11/1.x/GA/1.3.0.0</baseurl>
-      <repoid>HDP-1.3.0</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-</reposinfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/FLUME/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/FLUME/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/FLUME/configuration/global.xml
deleted file mode 100644
index f1fa4de..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/FLUME/configuration/global.xml
+++ /dev/null
@@ -1,24 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/FLUME/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/FLUME/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/FLUME/metainfo.xml
deleted file mode 100644
index 87ff4b3..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/FLUME/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Flume is a distributed, reliable, and available system for efficiently collecting, aggregating and moving large amounts of log data from many different sources to a centralized data store.</comment>
-    <version>1.3.1.1.3.0.0</version>
-
-    <components>
-        <component>
-            <name>FLUME_SERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/GANGLIA/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/GANGLIA/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/GANGLIA/configuration/global.xml
deleted file mode 100644
index 16df0b8..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/GANGLIA/configuration/global.xml
+++ /dev/null
@@ -1,55 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>ganglia_conf_dir</name>
-    <value>/etc/ganglia/hdp</value>
-    <description>Config directory for Ganglia</description>
-  </property>
-  <property>
-    <name>ganglia_runtime_dir</name>
-    <value>/var/run/ganglia/hdp</value>
-    <description>Run directories for Ganglia</description>
-  </property>
-  <property>
-    <name>ganglia_runtime_dir</name>
-    <value>/var/run/ganglia/hdp</value>
-    <description>Run directories for Ganglia</description>
-  </property>
-  <property>
-    <name>gmetad_user</name>
-    <value>nobody</value>
-    <description>User </description>
-  </property>
-    <property>
-    <name>gmond_user</name>
-    <value>nobody</value>
-    <description>User </description>
-  </property>
-  <property>
-    <name>rrdcached_base_dir</name>
-    <value>/var/lib/ganglia/rrds</value>
-    <description>Default directory for saving the rrd files on ganglia server</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/GANGLIA/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/GANGLIA/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/GANGLIA/metainfo.xml
deleted file mode 100644
index 0b21f0f..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/GANGLIA/metainfo.xml
+++ /dev/null
@@ -1,40 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Ganglia Metrics Collection system</comment>
-    <version>3.2.0</version>
-
-    <components>
-        <component>
-            <name>GANGLIA_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>GANGLIA_MONITOR</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>MONITOR_WEBSERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HBASE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HBASE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HBASE/configuration/global.xml
deleted file mode 100644
index bc8a7d3..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HBASE/configuration/global.xml
+++ /dev/null
@@ -1,160 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>hbasemaster_host</name>
-    <value></value>
-    <description>HBase Master Host.</description>
-  </property>
-  <property>
-    <name>regionserver_hosts</name>
-    <value></value>
-    <description>Region Server Hosts</description>
-  </property>
-  <property>
-    <name>hbase_log_dir</name>
-    <value>/var/log/hbase</value>
-    <description>Log Directories for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_pid_dir</name>
-    <value>/var/run/hbase</value>
-    <description>Log Directories for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_log_dir</name>
-    <value>/var/log/hbase</value>
-    <description>Log Directories for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_regionserver_heapsize</name>
-    <value>1024</value>
-    <description>Log Directories for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_master_heapsize</name>
-    <value>1024</value>
-    <description>HBase Master Heap Size</description>
-  </property>
-  <property>
-    <name>hstore_compactionthreshold</name>
-    <value>3</value>
-    <description>HBase HStore compaction threshold.</description>
-  </property>
-  <property>
-    <name>hfile_blockcache_size</name>
-    <value>0.25</value>
-    <description>HFile block cache size.</description>
-  </property>
-  <property>
-    <name>hstorefile_maxsize</name>
-    <value>1073741824</value>
-    <description>Maximum HStoreFile Size</description>
-  </property>
-    <property>
-    <name>regionserver_handlers</name>
-    <value>30</value>
-    <description>HBase RegionServer Handler</description>
-  </property>
-    <property>
-    <name>hregion_majorcompaction</name>
-    <value>86400000</value>
-    <description>HBase Major Compaction.</description>
-  </property>
-    <property>
-    <name>hregion_blockmultiplier</name>
-    <value>2</value>
-    <description>HBase Region Block Multiplier</description>
-  </property>
-    <property>
-    <name>hregion_memstoreflushsize</name>
-    <value></value>
-    <description>HBase Region MemStore Flush Size.</description>
-  </property>
-    <property>
-    <name>client_scannercaching</name>
-    <value>100</value>
-    <description>Base Client Scanner Caching</description>
-  </property>
-    <property>
-    <name>zookeeper_sessiontimeout</name>
-    <value>60000</value>
-    <description>ZooKeeper Session Timeout</description>
-  </property>
-    <property>
-    <name>hfile_max_keyvalue_size</name>
-    <value>10485760</value>
-    <description>HBase Client Maximum key-value Size</description>
-  </property>
-  <property>
-    <name>hbase_hdfs_root_dir</name>
-    <value>/apps/hbase/data</value>
-    <description>HBase Relative Path to HDFS.</description>
-  </property>
-   <property>
-    <name>hbase_conf_dir</name>
-    <value>/etc/hbase</value>
-    <description>Config Directory for HBase.</description>
-  </property>
-   <property>
-    <name>hdfs_enable_shortcircuit_read</name>
-    <value>true</value>
-    <description>HDFS Short Circuit Read</description>
-  </property>
-   <property>
-    <name>hdfs_support_append</name>
-    <value>true</value>
-    <description>HDFS append support</description>
-  </property>
-   <property>
-    <name>hstore_blockingstorefiles</name>
-    <value>7</value>
-    <description>HStore blocking storefiles.</description>
-  </property>
-   <property>
-    <name>regionserver_memstore_lab</name>
-    <value>true</value>
-    <description>Region Server memstore.</description>
-  </property>
-   <property>
-    <name>regionserver_memstore_lowerlimit</name>
-    <value>0.35</value>
-    <description>Region Server memstore lower limit.</description>
-  </property>
-   <property>
-    <name>regionserver_memstore_upperlimit</name>
-    <value>0.4</value>
-    <description>Region Server memstore upper limit.</description>
-  </property>
-   <property>
-    <name>hbase_conf_dir</name>
-    <value>/etc/hbase</value>
-    <description>HBase conf dir.</description>
-  </property>
-   <property>
-    <name>hbase_user</name>
-    <value>hbase</value>
-    <description>HBase User Name.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HBASE/configuration/hbase-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HBASE/configuration/hbase-policy.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HBASE/configuration/hbase-policy.xml
deleted file mode 100644
index e45f23c..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HBASE/configuration/hbase-policy.xml
+++ /dev/null
@@ -1,53 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HRegionInterface protocol implementations (ie. 
-    clients talking to HRegionServers)
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.admin.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HMasterInterface protocol implementation (ie. 
-    clients talking to HMaster for admin operations).
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.masterregion.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HMasterRegionInterface protocol implementations
-    (for HRegionServers communicating with HMaster)
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HBASE/configuration/hbase-site.xml
deleted file mode 100644
index be9c023..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HBASE/configuration/hbase-site.xml
+++ /dev/null
@@ -1,367 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>hbase.rootdir</name>
-    <value></value>
-    <description>The directory shared by region servers and into
-    which HBase persists.  The URL should be 'fully-qualified'
-    to include the filesystem scheme.  For example, to specify the
-    HDFS directory '/hbase' where the HDFS instance's namenode is
-    running at namenode.example.org on port 9000, set this value to:
-    hdfs://namenode.example.org:9000/hbase.  By default HBase writes
-    into /tmp.  Change this configuration else all data will be lost
-    on machine restart.
-    </description>
-  </property>
-  <property>
-    <name>hbase.cluster.distributed</name>
-    <value>true</value>
-    <description>The mode the cluster will be in. Possible values are
-      false for standalone mode and true for distributed mode.  If
-      false, startup will run all HBase and ZooKeeper daemons together
-      in the one JVM.
-    </description>
-  </property>
-  <property>
-    <name>hbase.tmp.dir</name>
-    <value></value>
-    <description>Temporary directory on the local filesystem.
-    Change this setting to point to a location more permanent
-    than '/tmp' (The '/tmp' directory is often cleared on
-    machine restart).
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.bindAddress</name>
-    <value></value>
-    <description>The bind address for the HBase Master web UI
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.port</name>
-    <value></value>
-    <description>The port for the HBase Master web UI.</description>
-  </property>
-  <property>
-    <name>hbase.regionserver.info.port</name>
-    <value></value>
-    <description>The port for the HBase RegionServer web UI.</description>
-  </property>
-  <property>
-    <name>hbase.regionserver.global.memstore.upperLimit</name>
-    <value></value>
-    <description>Maximum size of all memstores in a region server before new
-      updates are blocked and flushes are forced. Defaults to 40% of heap
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.handler.count</name>
-    <value></value>
-    <description>Count of RPC Listener instances spun up on RegionServers.
-    Same property is used by the Master for count of master handlers.
-    Default is 10.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.majorcompaction</name>
-    <value></value>
-    <description>The time (in miliseconds) between 'major' compactions of all
-    HStoreFiles in a region.  Default: 1 day.
-    Set to 0 to disable automated major compactions.
-    </description>
-  </property>
-  
-  <property>
-    <name>hbase.regionserver.global.memstore.lowerLimit</name>
-    <value></value>
-    <description>When memstores are being forced to flush to make room in
-      memory, keep flushing until we hit this mark. Defaults to 35% of heap.
-      This value equal to hbase.regionserver.global.memstore.upperLimit causes
-      the minimum possible flushing to occur when updates are blocked due to
-      memstore limiting.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.block.multiplier</name>
-    <value></value>
-    <description>Block updates if memstore has hbase.hregion.memstore.block.multiplier
-    time hbase.hregion.flush.size bytes.  Useful preventing
-    runaway memstore during spikes in update traffic.  Without an
-    upper-bound, memstore fills such that when it flushes the
-    resultant flush files take a long time to compact or split, or
-    worse, we OOME
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.flush.size</name>
-    <value></value>
-    <description>
-    Memstore will be flushed to disk if size of the memstore
-    exceeds this number of bytes.  Value is checked by a thread that runs
-    every hbase.server.thread.wakefrequency.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.mslab.enabled</name>
-    <value></value>
-    <description>
-      Enables the MemStore-Local Allocation Buffer,
-      a feature which works to prevent heap fragmentation under
-      heavy write loads. This can reduce the frequency of stop-the-world
-      GC pauses on large heaps.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.max.filesize</name>
-    <value></value>
-    <description>
-    Maximum HStoreFile size. If any one of a column families' HStoreFiles has
-    grown to exceed this value, the hosting HRegion is split in two.
-    Default: 1G.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.scanner.caching</name>
-    <value></value>
-    <description>Number of rows that will be fetched when calling next
-    on a scanner if it is not served from (local, client) memory. Higher
-    caching values will enable faster scanners but will eat up more memory
-    and some calls of next may take longer and longer times when the cache is empty.
-    Do not set this value such that the time between invocations is greater
-    than the scanner timeout; i.e. hbase.regionserver.lease.period
-    </description>
-  </property>
-  <property>
-    <name>zookeeper.session.timeout</name>
-    <value>30000</value>
-    <description>ZooKeeper session timeout.
-      HBase passes this to the zk quorum as suggested maximum time for a
-      session (This setting becomes zookeeper's 'maxSessionTimeout').  See
-      http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
-      "The client sends a requested timeout, the server responds with the
-      timeout that it can give the client. " In milliseconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.keyvalue.maxsize</name>
-    <value></value>
-    <description>Specifies the combined maximum allowed size of a KeyValue
-    instance. This is to set an upper boundary for a single entry saved in a
-    storage file. Since they cannot be split it helps avoiding that a region
-    cannot be split any further because the data is too large. It seems wise
-    to set this to a fraction of the maximum region size. Setting it to zero
-    or less disables the check.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.compactionThreshold</name>
-    <value></value>
-    <description>
-    If more than this number of HStoreFiles in any one HStore
-    (one HStoreFile is written per flush of memstore) then a compaction
-    is run to rewrite all HStoreFiles files as one.  Larger numbers
-    put off compaction but when it runs, it takes longer to complete.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.blockingStoreFiles</name>
-    <value></value>
-    <description>
-    If more than this number of StoreFiles in any one Store
-    (one StoreFile is written per flush of MemStore) then updates are
-    blocked for this HRegion until a compaction is completed, or
-    until hbase.hstore.blockingWaitTime has been exceeded.
-    </description>
-  </property>
-  <property>
-    <name>hfile.block.cache.size</name>
-    <value></value>
-    <description>
-        Percentage of maximum heap (-Xmx setting) to allocate to block cache
-        used by HFile/StoreFile. Default of 0.25 means allocate 25%.
-        Set to 0 to disable but it's not recommended.
-    </description>
-  </property>
-
-  <!-- The following properties configure authentication information for
-       HBase processes when using Kerberos security.  There are no default
-       values, included here for documentation purposes -->
-  <property>
-    <name>hbase.master.keytab.file</name>
-    <value></value>
-    <description>Full path to the kerberos keytab file to use for logging in
-    the configured HMaster server principal.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.kerberos.principal</name>
-    <value></value>
-    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
-    that should be used to run the HMaster process.  The principal name should
-    be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the hostname
-    portion, it will be replaced with the actual hostname of the running
-    instance.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.keytab.file</name>
-    <value></value>
-    <description>Full path to the kerberos keytab file to use for logging in
-    the configured HRegionServer server principal.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.kerberos.principal</name>
-    <value></value>
-    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
-    that should be used to run the HRegionServer process.  The principal name
-    should be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the
-    hostname portion, it will be replaced with the actual hostname of the
-    running instance.  An entry for this principal must exist in the file
-    specified in hbase.regionserver.keytab.file
-    </description>
-  </property>
-
-  <!-- Additional configuration specific to HBase security -->
-  <property>
-    <name>hbase.superuser</name>
-    <value>hbase</value>
-    <description>List of users or groups (comma-separated), who are allowed
-    full privileges, regardless of stored ACLs, across the cluster.
-    Only used when HBase security is enabled.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.security.authentication</name>
-    <value>simple</value>
-  </property>
-
-  <property>
-    <name>hbase.rpc.engine</name>
-    <value>org.apache.hadoop.hbase.ipc.WritableRpcEngine</value>
-  </property>
-
-  <property>
-    <name>hbase.security.authorization</name>
-    <value>false</value>
-    <description>Enables HBase authorization. Set the value of this property to false to disable HBase authorization.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.region.classes</name>
-    <value></value>
-    <description>A comma-separated list of Coprocessors that are loaded by
-    default on all tables. For any override coprocessor method, these classes
-    will be called in order. After implementing your own Coprocessor, just put
-    it in HBase's classpath and add the fully qualified class name here.
-    A coprocessor can also be loaded on demand by setting HTableDescriptor.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.master.classes</name>
-    <value></value>
-    <description>A comma-separated list of
-      org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
-      loaded by default on the active HMaster process. For any implemented
-      coprocessor methods, the listed classes will be called in order. After
-      implementing your own MasterObserver, just put it in HBase's classpath
-      and add the fully qualified class name here.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.zookeeper.property.clientPort</name>
-    <value>2181</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-    The port at which the clients will connect.
-    </description>
-  </property>
-
-  <!--
-  The following three properties are used together to create the list of
-  host:peer_port:leader_port quorum servers for ZooKeeper.
-  -->
-  <property>
-    <name>hbase.zookeeper.quorum</name>
-    <value></value>
-    <description>Comma separated list of servers in the ZooKeeper Quorum.
-    For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
-    By default this is set to localhost for local and pseudo-distributed modes
-    of operation. For a fully-distributed setup, this should be set to a full
-    list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
-    this is the list of servers which we will start/stop ZooKeeper on.
-    </description>
-  </property>
-  <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
-
-  <property>
-    <name>dfs.support.append</name>
-    <value></value>
-    <description>Does HDFS allow appends to files?
-    This is an hdfs config. set in here so the hdfs client will do append support.
-    You must ensure that this config. is true serverside too when running hbase
-    (You will have to restart your cluster after setting it).
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.client.read.shortcircuit</name>
-    <value></value>
-    <description>Enable/Disable short circuit read for your client.
-    Hadoop servers should be configured to allow short circuit read
-    for the hbase user for this to take effect
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.client.read.shortcircuit.skip.checksum</name>
-    <value></value>
-    <description>Enable/disbale skipping the checksum check</description>
-  </property>
-  
-  <property>
-    <name>hbase.zookeeper.useMulti</name>
-    <value>true</value>
-    <description>Instructs HBase to make use of ZooKeeper's multi-update functionality.
-    This allows certain ZooKeeper operations to complete more quickly and prevents some issues
-    with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).ยท
-    IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+
-    and will not be downgraded.  ZooKeeper versions before 3.4 do not support multi-update and will
-    not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).
-    </description>
-  </property>
-  <property>
-    <name>zookeeper.znode.parent</name>
-    <value>/hbase-unsecure</value>
-    <description>Root ZNode for HBase in ZooKeeper. All of HBase's ZooKeeper
-      files that are configured with a relative path will go under this node.
-      By default, all of HBase's ZooKeeper file path are configured with a
-      relative path, so they will all go under this directory unless changed.
-    </description>
-  </property>
-
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HBASE/metainfo.xml
deleted file mode 100644
index 1bb3d14..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HBASE/metainfo.xml
+++ /dev/null
@@ -1,40 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>mapred</user>
-    <comment>Non-relational distributed database and centralized service for configuration management &amp; synchronization</comment>
-    <version>0.94.6.1.3.0.0</version>
-
-    <components>
-        <component>
-            <name>HBASE_MASTER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>HBASE_REGIONSERVER</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>HBASE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HCATALOG/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HCATALOG/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HCATALOG/configuration/global.xml
deleted file mode 100644
index dd89409..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HCATALOG/configuration/global.xml
+++ /dev/null
@@ -1,45 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>hcat_log_dir</name>
-    <value>/var/log/webhcat</value>
-    <description>WebHCat Log Dir.</description>
-  </property>
-  <property>
-    <name>hcat_pid_dir</name>
-    <value>/etc/run/webhcat</value>
-    <description>WebHCat Pid Dir.</description>
-  </property>
-  <property>
-    <name>hcat_user</name>
-    <value>hcat</value>
-    <description>HCat User.</description>
-  </property>
-  <property>
-    <name>webhcat_user</name>
-    <value>hcat</value>
-    <description>WebHCat User.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HCATALOG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HCATALOG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HCATALOG/metainfo.xml
deleted file mode 100644
index dd5ff08..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HCATALOG/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for HCATALOG service</comment>
-    <version>0.6.0.1.3.0.0</version>
-
-    <components>
-        <component>
-            <name>HCAT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HDFS/configuration/core-site.xml
deleted file mode 100644
index fe0bcba..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HDFS/configuration/core-site.xml
+++ /dev/null
@@ -1,253 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
- <!--
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
- 
-        http://www.apache.org/licenses/LICENSE-2.0
- 
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
- -->
- 
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-<!-- i/o properties -->
-
-  <property>
-    <name>io.file.buffer.size</name>
-    <value>131072</value>
-    <description>The size of buffer for use in sequence files.
-  The size of this buffer should probably be a multiple of hardware
-  page size (4096 on Intel x86), and it determines how much data is
-  buffered during read and write operations.</description>
-  </property>
-
-  <property>
-    <name>io.serializations</name>
-    <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
-  </property>
-
-  <property>
-    <name>io.compression.codecs</name>
-    <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.SnappyCodec</value>
-    <description>A list of the compression codec classes that can be used
-                 for compression/decompression.</description>
-  </property>
-
-
-  <property>
-    <name>io.compression.codec.lzo.class</name>
-    <value>com.hadoop.compression.lzo.LzoCodec</value>
-    <description>The implementation for lzo codec.</description>
-  </property>
-
-
-<!-- file system properties -->
-
-  <property>
-    <name>fs.default.name</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>The name of the default file system.  Either the
-  literal string "local" or a host:port for NDFS.</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>fs.trash.interval</name>
-    <value>360</value>
-    <description>Number of minutes between trash checkpoints.
-  If zero, the trash feature is disabled.
-  </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.dir</name>
-    <value></value>
-    <description>Determines where on the local filesystem the DFS secondary
-        name node should store the temporary images to merge.
-        If this is a comma-delimited list of directories then the image is
-        replicated in all of the directories for redundancy.
-    </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.edits.dir</name>
-    <value>${fs.checkpoint.dir}</value>
-    <description>Determines where on the local filesystem the DFS secondary
-        name node should store the temporary edits to merge.
-        If this is a comma-delimited list of directoires then teh edits is
-        replicated in all of the directoires for redundancy.
-        Default value is same as fs.checkpoint.dir
-    </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.period</name>
-    <value>21600</value>
-    <description>The number of seconds between two periodic checkpoints.
-  </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.size</name>
-    <value>536870912</value>
-    <description>The size of the current edit log (in bytes) that triggers
-       a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
-  </description>
-  </property>
-
-  <!-- ipc properties: copied from kryptonite configuration -->
-  <property>
-    <name>ipc.client.idlethreshold</name>
-    <value>8000</value>
-    <description>Defines the threshold number of connections after which
-               connections will be inspected for idleness.
-  </description>
-  </property>
-
-  <property>
-    <name>ipc.client.connection.maxidletime</name>
-    <value>30000</value>
-    <description>The maximum time after which a client will bring down the
-               connection to the server.
-  </description>
-  </property>
-
-  <property>
-    <name>ipc.client.connect.max.retries</name>
-    <value>50</value>
-    <description>Defines the maximum number of retries for IPC connections.</description>
-  </property>
-
-  <!-- Web Interface Configuration -->
-  <property>
-    <name>webinterface.private.actions</name>
-    <value>false</value>
-    <description> If set to true, the web interfaces of JT and NN may contain
-                actions, such as kill job, delete file, etc., that should
-                not be exposed to public. Enable this option if the interfaces
-                are only reachable by those who have the right authorization.
-  </description>
-  </property>
-
- <property>
-   <name>hadoop.security.authentication</name>
-   <value>simple</value>
-   <description>
-   Set the authentication for the cluster. Valid values are: simple or
-   kerberos.
-   </description>
- </property>
-<property>
-  <name>hadoop.security.authorization</name>
-  <value></value>
-  <description>
-     Enable authorization for different protocols.
-  </description>
-</property>
-
-  <property>
-    <name>hadoop.security.auth_to_local</name>
-    <value></value>
-<description>The mapping from kerberos principal names to local OS user names.
-  So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
-  "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
-The translations rules have 3 sections:
-      base     filter    substitution
-The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
-
-[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
-[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
-[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
-
-The filter is a regex in parens that must the generated string for the rule to apply.
-
-"(.*%admin)" will take any string that ends in "%admin"
-"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
-
-Finally, the substitution is a sed rule to translate a regex into a fixed string.
-
-"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
-"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
-"s/X/Y/g" replaces all of the "X" in the name with "Y"
-
-So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
-
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
-
-To also translate the names with a second component, you'd make the rules:
-
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-RULE:[2:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
-
-If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
-
-RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
-DEFAULT
-    </description>
-  </property>
-
-<!--
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("hcat_user").groups</name>
-  <value></value>
-  <description>
-     Proxy group for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("hcat_user").hosts</name>
-  <value></value>
-  <description>
-     Proxy host for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("oozie_user").groups</name>
-  <value></value>
-  <description>
-     Proxy group for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("oozie_user").hosts</name>
-  <value></value>
-  <description>
-     Proxy host for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("templeton_user").groups</name>
-  <value></value>
-  <description>
-    Proxy group for templeton.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("templeton_user").hosts</name>
-  <value></value>
-  <description>
-    Proxy host for templeton.
-  </description>
-</property>
--->
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HDFS/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HDFS/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HDFS/configuration/global.xml
deleted file mode 100644
index f10b9f9..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HDFS/configuration/global.xml
+++ /dev/null
@@ -1,187 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>namenode_host</name>
-    <value></value>
-    <description>NameNode Host.</description>
-  </property>
-  <property>
-    <name>dfs_name_dir</name>
-    <value>/hadoop/hdfs/namenode</value>
-    <description>NameNode Directories.</description>
-  </property>
-  <property>
-    <name>snamenode_host</name>
-    <value></value>
-    <description>Secondary NameNode.</description>
-  </property>
-  <property>
-    <name>fs_checkpoint_dir</name>
-    <value>/hadoop/hdfs/namesecondary</value>
-    <description>Secondary NameNode checkpoint dir.</description>
-  </property>
-  <property>
-    <name>datanode_hosts</name>
-    <value></value>
-    <description>List of Datanode Hosts.</description>
-  </property>
-  <property>
-    <name>dfs_data_dir</name>
-    <value>/hadoop/hdfs/data</value>
-    <description>Data directories for Data Nodes.</description>
-  </property>
-  <property>
-    <name>hdfs_log_dir_prefix</name>
-    <value>/var/log/hadoop</value>
-    <description>Hadoop Log Dir Prefix</description>
-  </property>
-  <property>
-    <name>hadoop_pid_dir_prefix</name>
-    <value>/var/run/hadoop</value>
-    <description>Hadoop PID Dir Prefix</description>
-  </property>
-  <property>
-    <name>dfs_webhdfs_enabled</name>
-    <value>true</value>
-    <description>WebHDFS enabled</description>
-  </property>
-  <property>
-    <name>hadoop_heapsize</name>
-    <value>1024</value>
-    <description>Hadoop maximum Java heap size</description>
-  </property>
-  <property>
-    <name>namenode_heapsize</name>
-    <value>1024</value>
-    <description>NameNode Java heap size</description>
-  </property>
-  <property>
-    <name>namenode_opt_newsize</name>
-    <value>200</value>
-    <description>NameNode new generation size</description>
-  </property>
-  <property>
-    <name>namenode_opt_maxnewsize</name>
-    <value>640</value>
-    <description>NameNode maximum new generation size</description>
-  </property>
-  <property>
-    <name>datanode_du_reserved</name>
-    <value>1</value>
-    <description>Reserved space for HDFS</description>
-  </property>
-  <property>
-    <name>dtnode_heapsize</name>
-    <value>1024</value>
-    <description>DataNode maximum Java heap size</description>
-  </property>
-  <property>
-    <name>dfs_datanode_failed_volume_tolerated</name>
-    <value>0</value>
-    <description>DataNode volumes failure toleration</description>
-  </property>
-  <property>
-    <name>fs_checkpoint_period</name>
-    <value>21600</value>
-    <description>HDFS Maximum Checkpoint Delay</description>
-  </property>
-  <property>
-    <name>fs_checkpoint_size</name>
-    <value>0.5</value>
-    <description>FS Checkpoint Size.</description>
-  </property>
-  <property>
-    <name>proxyuser_group</name>
-    <value>users</value>
-    <description>Proxy user group.</description>
-  </property>
-  <property>
-    <name>dfs_exclude</name>
-    <value></value>
-    <description>HDFS Exclude hosts.</description>
-  </property>
-  <property>
-    <name>dfs_include</name>
-    <value></value>
-    <description>HDFS Include hosts.</description>
-  </property>
-  <property>
-    <name>dfs_replication</name>
-    <value>3</value>
-    <description>Default Block Replication.</description>
-  </property>
-  <property>
-    <name>dfs_block_local_path_access_user</name>
-    <value>hbase</value>
-    <description>Default Block Replication.</description>
-  </property>
-  <property>
-    <name>dfs_datanode_address</name>
-    <value>50010</value>
-    <description>Port for datanode address.</description>
-  </property>
-  <property>
-    <name>dfs_datanode_http_address</name>
-    <value>50075</value>
-    <description>Port for datanode address.</description>
-  </property>
-  <property>
-    <name>dfs_datanode_data_dir_perm</name>
-    <value>750</value>
-    <description>Datanode dir perms.</description>
-  </property>
-
-  <property>
-    <name>security_enabled</name>
-    <value>false</value>
-    <description>Hadoop Security</description>
-  </property>
-  <property>
-    <name>kerberos_domain</name>
-    <value>EXAMPLE.COM</value>
-    <description>Kerberos realm.</description>
-  </property>
-  <property>
-    <name>kadmin_pw</name>
-    <value></value>
-    <description>Kerberos realm admin password</description>
-  </property>
-  <property>
-    <name>keytab_path</name>
-    <value>/etc/security/keytabs</value>
-    <description>Kerberos keytab path.</description>
-  </property>
-
-    <property>
-    <name>namenode_formatted_mark_dir</name>
-    <value>/var/run/hadoop/hdfs/namenode/formatted/</value>
-    <description>Formatteed Mark Directory.</description>
-  </property>
-    <property>
-    <name>hdfs_user</name>
-    <value>hdfs</value>
-    <description>User and Groups.</description>
-  </property>
-  
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HDFS/configuration/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HDFS/configuration/hadoop-policy.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HDFS/configuration/hadoop-policy.xml
deleted file mode 100644
index 900da99..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HDFS/configuration/hadoop-policy.xml
+++ /dev/null
@@ -1,134 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientProtocol, which is used by user code
-    via the DistributedFileSystem.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.client.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
-    for block recovery.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for DatanodeProtocol, which is used by datanodes to
-    communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
-    for updating generation timestamp.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.namenode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for NamenodeProtocol, the protocol used by the secondary
-    namenode to communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.tracker.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterTrackerProtocol, used by the tasktrackers to
-    communicate with the jobtracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.job.submission.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for JobSubmissionProtocol, used by job clients to
-    communciate with the jobtracker for job submission, querying job status etc.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.task.umbilical.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
-    tasks to communicate with the parent tasktracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
- <property>
-    <name>security.admin.operations.protocol.acl</name>
-    <value></value>
-    <description>ACL for AdminOperationsProtocol. Used for admin commands.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.refresh.usertogroups.mappings.protocol.acl</name>
-    <value></value>
-    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
-    users mappings. The ACL is a comma-separated list of user and
-    group names. The user and group list is separated by a blank. For
-    e.g. "alice,bob users,wheel".  A special value of "*" means all
-    users are allowed.</description>
-  </property>
-
-<property>
-    <name>security.refresh.policy.protocol.acl</name>
-    <value></value>
-    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
-    dfsadmin and mradmin commands to refresh the security policy in-effect.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HDFS/configuration/hdfs-site.xml
deleted file mode 100644
index bd978a9..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HDFS/configuration/hdfs-site.xml
+++ /dev/null
@@ -1,454 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-<!-- file system properties -->
-
-  <property>
-    <name>dfs.name.dir</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>Determines where on the local filesystem the DFS name node
-      should store the name table.  If this is a comma-delimited list
-      of directories then the name table is replicated in all of the
-      directories, for redundancy. </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.support.append</name>
-    <value></value>
-    <description>to enable dfs append</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.webhdfs.enabled</name>
-    <value></value>
-    <description>to enable webhdfs</description>
-    <final>true</final>
-  </property>
-
- <property>
-    <name>dfs.datanode.socket.write.timeout</name>
-    <value>0</value>
-    <description>DFS Client write socket timeout</description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.failed.volumes.tolerated</name>
-    <value></value>
-    <description>#of failed disks dn would tolerate</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.block.local-path-access.user</name>
-    <value></value>
-    <description>the user who is allowed to perform short
-    circuit reads.
-    </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.data.dir</name>
-    <value></value>
-    <description>Determines where on the local filesystem an DFS data node
-  should store its blocks.  If this is a comma-delimited
-  list of directories, then data will be stored in all named
-  directories, typically on different devices.
-  Directories that do not exist are ignored.
-  </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.hosts.exclude</name>
-    <value></value>
-    <description>Names a file that contains a list of hosts that are
-    not permitted to connect to the namenode.  The full pathname of the
-    file must be specified.  If the value is empty, no hosts are
-    excluded.</description>
-  </property>
-
-  <property>
-    <name>dfs.hosts</name>
-    <value></value>
-    <description>Names a file that contains a list of hosts that are
-    permitted to connect to the namenode. The full pathname of the file
-    must be specified.  If the value is empty, all hosts are
-    permitted.</description>
-  </property>
-
-  <property>
-    <name>dfs.replication.max</name>
-    <value>50</value>
-    <description>Maximal block replication.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.replication</name>
-    <value></value>
-    <description>Default block replication.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.heartbeat.interval</name>
-    <value>3</value>
-    <description>Determines datanode heartbeat interval in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.safemode.threshold.pct</name>
-    <value>1.0f</value>
-    <description>
-        Specifies the percentage of blocks that should satisfy
-        the minimal replication requirement defined by dfs.replication.min.
-        Values less than or equal to 0 mean not to start in safe mode.
-        Values greater than 1 will make safe mode permanent.
-        </description>
-  </property>
-
-  <property>
-    <name>dfs.balance.bandwidthPerSec</name>
-    <value>6250000</value>
-    <description>
-        Specifies the maximum amount of bandwidth that each datanode
-        can utilize for the balancing purpose in term of
-        the number of bytes per second.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.https.port</name>
-    <value>50070</value>
-    <description>
-      This property is used by HftpFileSystem.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.address</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>dfs.datanode.http.address</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>dfs.block.size</name>
-    <value>134217728</value>
-    <description>The default block size for new files.</description>
-  </property>
-
-  <property>
-    <name>dfs.http.address</name>
-    <value></value>
-<description>The name of the default file system.  Either the
-literal string "local" or a host:port for NDFS.</description>
-<final>true</final>
-</property>
-
-<property>
-<name>dfs.datanode.du.reserved</name>
-<!-- cluster variant -->
-<value></value>
-<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
-</description>
-</property>
-
-<property>
-<name>dfs.datanode.ipc.address</name>
-<value>0.0.0.0:8010</value>
-<description>
-The datanode ipc server address and port.
-If the port is 0 then the server will start on a free port.
-</description>
-</property>
-
-<property>
-<name>dfs.blockreport.initialDelay</name>
-<value>120</value>
-<description>Delay for first block report in seconds.</description>
-</property>
-
-<property>
-<name>dfs.datanode.du.pct</name>
-<value>0.85f</value>
-<description>When calculating remaining space, only use this percentage of the real available space
-</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>40</value>
-<description>The number of server threads for the namenode.</description>
-</property>
-
-<property>
-<name>dfs.datanode.max.xcievers</name>
-<value>4096</value>
-<description>PRIVATE CONFIG VARIABLE</description>
-</property>
-
-<!-- Permissions configuration -->
-
-<property>
-<name>dfs.umaskmode</name>
-<value>077</value>
-<description>
-The octal umask used when creating files and directories.
-</description>
-</property>
-
-<property>
-<name>dfs.web.ugi</name>
-<!-- cluster variant -->
-<value>gopher,gopher</value>
-<description>The user account used by the web interface.
-Syntax: USERNAME,GROUP1,GROUP2, ...
-</description>
-</property>
-
-<property>
-<name>dfs.permissions</name>
-<value>true</value>
-<description>
-If "true", enable permission checking in HDFS.
-If "false", permission checking is turned off,
-but all other behavior is unchanged.
-Switching from one parameter value to the other does not change the mode,
-owner or group of files or directories.
-</description>
-</property>
-
-<property>
-<name>dfs.permissions.supergroup</name>
-<value>hdfs</value>
-<description>The name of the group of super-users.</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>100</value>
-<description>Added to grow Queue size so that more client connections are allowed</description>
-</property>
-
-<property>
-<name>ipc.server.max.response.size</name>
-<value>5242880</value>
-</property>
-<property>
-<name>dfs.block.access.token.enable</name>
-<value>true</value>
-<description>
-If "true", access tokens are used as capabilities for accessing datanodes.
-If "false", no access tokens are checked on accessing datanodes.
-</description>
-</property>
-
-<property>
-<name>dfs.namenode.kerberos.principal</name>
-<value></value>
-<description>
-Kerberos principal name for the NameNode
-</description>
-</property>
-
-<property>
-<name>dfs.secondary.namenode.kerberos.principal</name>
-<value></value>
-    <description>
-        Kerberos principal name for the secondary NameNode.
-    </description>
-  </property>
-
-
-<!--
-  This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
--->
-  <property>
-    <name>dfs.namenode.kerberos.https.principal</name>
-    <value></value>
-     <description>The Kerberos principal for the host that the NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.kerberos.https.principal</name>
-    <value></value>
-    <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <!-- cluster variant -->
-    <name>dfs.secondary.http.address</name>
-    <value></value>
-    <description>Address of secondary namenode web server</description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.https.port</name>
-    <value>50490</value>
-    <description>The https port where secondary-namenode binds</description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.principal</name>
-    <value></value>
-    <description>
-      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
-      HTTP SPENGO specification.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.keytab</name>
-    <value></value>
-    <description>
-      The Kerberos keytab file with the credentials for the
-      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.kerberos.principal</name>
-    <value></value>
- <description>
-        The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.keytab.file</name>
-    <value></value>
- <description>
-        Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.keytab.file</name>
-    <value></value>
-  <description>
-        Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.keytab.file</name>
-    <value></value>
- <description>
-        The filename of the keytab file for the DataNode.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.https.port</name>
-    <value>50470</value>
- <description>The https port where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.https.address</name>
-    <value></value>
-  <description>The https address where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.datanode.data.dir.perm</name>
-    <value></value>
-<description>The permissions that should be there on dfs.data.dir
-directories. The datanode will not come up if the permissions are
-different on existing dfs.data.dir directories. If the directories
-don't exist, they will be created with this permission.</description>
-  </property>
-
-  <property>
-  <name>dfs.access.time.precision</name>
-  <value>0</value>
-  <description>The access time for HDFS file is precise upto this value.
-               The default value is 1 hour. Setting a value of 0 disables
-               access times for HDFS.
-  </description>
-</property>
-
-<property>
- <name>dfs.cluster.administrators</name>
- <value> hdfs</value>
- <description>ACL for who all can view the default servlets in the HDFS</description>
-</property>
-
-<property>
-  <name>ipc.server.read.threadpool.size</name>
-  <value>5</value>
-  <description></description>
-</property>
-
-<property>
-  <name>dfs.datanode.failed.volumes.tolerated</name>
-  <value>0</value>
-  <description>Number of failed disks datanode would tolerate</description>
-</property>
-
-  <property>
-    <name>dfs.namenode.avoid.read.stale.datanode</name>
-    <value>true</value>
-    <description>
-      Indicate whether or not to avoid reading from stale datanodes whose
-      heartbeat messages have not been received by the namenode for more than a
-      specified time interval.
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.avoid.write.stale.datanode</name>
-    <value>true</value>
-    <description>
-      Indicate whether or not to avoid writing to stale datanodes whose
-      heartbeat messages have not been received by the namenode for more than a
-      specified time interval.
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.write.stale.datanode.ratio</name>
-    <value>1.0f</value>
-    <description>When the ratio of number stale datanodes to total datanodes marked is greater
-      than this ratio, stop avoiding writing to stale nodes so as to prevent causing hotspots.
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.stale.datanode.interval</name>
-    <value>30000</value>
-    <description>Datanode is stale after not getting a heartbeat in this interval in ms</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HDFS/metainfo.xml
deleted file mode 100644
index c29bb61..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HDFS/metainfo.xml
+++ /dev/null
@@ -1,46 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Apache Hadoop Distributed File System</comment>
-    <version>1.2.0.1.3.0.0</version>
-
-    <components>
-        <component>
-            <name>NAMENODE</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>DATANODE</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>SECONDARY_NAMENODE</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>HDFS_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-
-</metainfo>


[06/14] AMBARI-3777. Remove HDPLocal stack from stack definition. (yusaku)

Posted by yu...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HDFS/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HDFS/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HDFS/configuration/global.xml
deleted file mode 100644
index f10b9f9..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HDFS/configuration/global.xml
+++ /dev/null
@@ -1,187 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>namenode_host</name>
-    <value></value>
-    <description>NameNode Host.</description>
-  </property>
-  <property>
-    <name>dfs_name_dir</name>
-    <value>/hadoop/hdfs/namenode</value>
-    <description>NameNode Directories.</description>
-  </property>
-  <property>
-    <name>snamenode_host</name>
-    <value></value>
-    <description>Secondary NameNode.</description>
-  </property>
-  <property>
-    <name>fs_checkpoint_dir</name>
-    <value>/hadoop/hdfs/namesecondary</value>
-    <description>Secondary NameNode checkpoint dir.</description>
-  </property>
-  <property>
-    <name>datanode_hosts</name>
-    <value></value>
-    <description>List of Datanode Hosts.</description>
-  </property>
-  <property>
-    <name>dfs_data_dir</name>
-    <value>/hadoop/hdfs/data</value>
-    <description>Data directories for Data Nodes.</description>
-  </property>
-  <property>
-    <name>hdfs_log_dir_prefix</name>
-    <value>/var/log/hadoop</value>
-    <description>Hadoop Log Dir Prefix</description>
-  </property>
-  <property>
-    <name>hadoop_pid_dir_prefix</name>
-    <value>/var/run/hadoop</value>
-    <description>Hadoop PID Dir Prefix</description>
-  </property>
-  <property>
-    <name>dfs_webhdfs_enabled</name>
-    <value>true</value>
-    <description>WebHDFS enabled</description>
-  </property>
-  <property>
-    <name>hadoop_heapsize</name>
-    <value>1024</value>
-    <description>Hadoop maximum Java heap size</description>
-  </property>
-  <property>
-    <name>namenode_heapsize</name>
-    <value>1024</value>
-    <description>NameNode Java heap size</description>
-  </property>
-  <property>
-    <name>namenode_opt_newsize</name>
-    <value>200</value>
-    <description>NameNode new generation size</description>
-  </property>
-  <property>
-    <name>namenode_opt_maxnewsize</name>
-    <value>640</value>
-    <description>NameNode maximum new generation size</description>
-  </property>
-  <property>
-    <name>datanode_du_reserved</name>
-    <value>1</value>
-    <description>Reserved space for HDFS</description>
-  </property>
-  <property>
-    <name>dtnode_heapsize</name>
-    <value>1024</value>
-    <description>DataNode maximum Java heap size</description>
-  </property>
-  <property>
-    <name>dfs_datanode_failed_volume_tolerated</name>
-    <value>0</value>
-    <description>DataNode volumes failure toleration</description>
-  </property>
-  <property>
-    <name>fs_checkpoint_period</name>
-    <value>21600</value>
-    <description>HDFS Maximum Checkpoint Delay</description>
-  </property>
-  <property>
-    <name>fs_checkpoint_size</name>
-    <value>0.5</value>
-    <description>FS Checkpoint Size.</description>
-  </property>
-  <property>
-    <name>proxyuser_group</name>
-    <value>users</value>
-    <description>Proxy user group.</description>
-  </property>
-  <property>
-    <name>dfs_exclude</name>
-    <value></value>
-    <description>HDFS Exclude hosts.</description>
-  </property>
-  <property>
-    <name>dfs_include</name>
-    <value></value>
-    <description>HDFS Include hosts.</description>
-  </property>
-  <property>
-    <name>dfs_replication</name>
-    <value>3</value>
-    <description>Default Block Replication.</description>
-  </property>
-  <property>
-    <name>dfs_block_local_path_access_user</name>
-    <value>hbase</value>
-    <description>Default Block Replication.</description>
-  </property>
-  <property>
-    <name>dfs_datanode_address</name>
-    <value>50010</value>
-    <description>Port for datanode address.</description>
-  </property>
-  <property>
-    <name>dfs_datanode_http_address</name>
-    <value>50075</value>
-    <description>Port for datanode address.</description>
-  </property>
-  <property>
-    <name>dfs_datanode_data_dir_perm</name>
-    <value>750</value>
-    <description>Datanode dir perms.</description>
-  </property>
-
-  <property>
-    <name>security_enabled</name>
-    <value>false</value>
-    <description>Hadoop Security</description>
-  </property>
-  <property>
-    <name>kerberos_domain</name>
-    <value>EXAMPLE.COM</value>
-    <description>Kerberos realm.</description>
-  </property>
-  <property>
-    <name>kadmin_pw</name>
-    <value></value>
-    <description>Kerberos realm admin password</description>
-  </property>
-  <property>
-    <name>keytab_path</name>
-    <value>/etc/security/keytabs</value>
-    <description>Kerberos keytab path.</description>
-  </property>
-
-    <property>
-    <name>namenode_formatted_mark_dir</name>
-    <value>/var/run/hadoop/hdfs/namenode/formatted/</value>
-    <description>Formatteed Mark Directory.</description>
-  </property>
-    <property>
-    <name>hdfs_user</name>
-    <value>hdfs</value>
-    <description>User and Groups.</description>
-  </property>
-  
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HDFS/configuration/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HDFS/configuration/hadoop-policy.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HDFS/configuration/hadoop-policy.xml
deleted file mode 100644
index 900da99..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HDFS/configuration/hadoop-policy.xml
+++ /dev/null
@@ -1,134 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientProtocol, which is used by user code
-    via the DistributedFileSystem.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.client.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
-    for block recovery.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for DatanodeProtocol, which is used by datanodes to
-    communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
-    for updating generation timestamp.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.namenode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for NamenodeProtocol, the protocol used by the secondary
-    namenode to communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.tracker.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterTrackerProtocol, used by the tasktrackers to
-    communicate with the jobtracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.job.submission.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for JobSubmissionProtocol, used by job clients to
-    communciate with the jobtracker for job submission, querying job status etc.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.task.umbilical.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
-    tasks to communicate with the parent tasktracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
- <property>
-    <name>security.admin.operations.protocol.acl</name>
-    <value></value>
-    <description>ACL for AdminOperationsProtocol. Used for admin commands.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.refresh.usertogroups.mappings.protocol.acl</name>
-    <value></value>
-    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
-    users mappings. The ACL is a comma-separated list of user and
-    group names. The user and group list is separated by a blank. For
-    e.g. "alice,bob users,wheel".  A special value of "*" means all
-    users are allowed.</description>
-  </property>
-
-<property>
-    <name>security.refresh.policy.protocol.acl</name>
-    <value></value>
-    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
-    dfsadmin and mradmin commands to refresh the security policy in-effect.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HDFS/configuration/hdfs-site.xml
deleted file mode 100644
index 15666e0..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HDFS/configuration/hdfs-site.xml
+++ /dev/null
@@ -1,454 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-  <!-- file system properties -->
-
-  <property>
-    <name>dfs.name.dir</name>
-    <!-- cluster variant -->
-    <value>/hadoop/hdfs/namenode</value>
-    <description>Determines where on the local filesystem the DFS name node
-      should store the name table.  If this is a comma-delimited list
-      of directories then the name table is replicated in all of the
-      directories, for redundancy. </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.support.append</name>
-    <value>true</value>
-    <description>to enable dfs append</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.webhdfs.enabled</name>
-    <value>true</value>
-    <description>to enable webhdfs</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.datanode.socket.write.timeout</name>
-    <value>0</value>
-    <description>DFS Client write socket timeout</description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.failed.volumes.tolerated</name>
-    <value>0</value>
-    <description>#of failed disks dn would tolerate</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.block.local-path-access.user</name>
-    <value>hbase</value>
-    <description>the user who is allowed to perform short
-      circuit reads.
-    </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.data.dir</name>
-    <value>/hadoop/hdfs/data</value>
-    <description>Determines where on the local filesystem an DFS data node
-      should store its blocks.  If this is a comma-delimited
-      list of directories, then data will be stored in all named
-      directories, typically on different devices.
-      Directories that do not exist are ignored.
-    </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.hosts.exclude</name>
-    <value>/etc/hadoop/conf/dfs.exclude</value>
-    <description>Names a file that contains a list of hosts that are
-      not permitted to connect to the namenode.  The full pathname of the
-      file must be specified.  If the value is empty, no hosts are
-      excluded.</description>
-  </property>
-
-  <property>
-    <name>dfs.hosts</name>
-    <value>/etc/hadoop/conf/dfs.include</value>
-    <description>Names a file that contains a list of hosts that are
-      permitted to connect to the namenode. The full pathname of the file
-      must be specified.  If the value is empty, all hosts are
-      permitted.</description>
-  </property>
-
-  <property>
-    <name>dfs.replication.max</name>
-    <value>50</value>
-    <description>Maximal block replication.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.replication</name>
-    <value>3</value>
-    <description>Default block replication.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.heartbeat.interval</name>
-    <value>3</value>
-    <description>Determines datanode heartbeat interval in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.safemode.threshold.pct</name>
-    <value>1.0f</value>
-    <description>
-      Specifies the percentage of blocks that should satisfy
-      the minimal replication requirement defined by dfs.replication.min.
-      Values less than or equal to 0 mean not to start in safe mode.
-      Values greater than 1 will make safe mode permanent.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.balance.bandwidthPerSec</name>
-    <value>6250000</value>
-    <description>
-      Specifies the maximum amount of bandwidth that each datanode
-      can utilize for the balancing purpose in term of
-      the number of bytes per second.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.https.port</name>
-    <value>50070</value>
-    <description>
-      This property is used by HftpFileSystem.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.address</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>dfs.datanode.http.address</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>dfs.block.size</name>
-    <value>134217728</value>
-    <description>The default block size for new files.</description>
-  </property>
-
-  <property>
-    <name>dfs.http.address</name>
-    <value>localhost:50070</value>
-    <description>The name of the default file system.  Either the
-      literal string "local" or a host:port for NDFS.</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.datanode.du.reserved</name>
-    <!-- cluster variant -->
-    <value>1073741824</value>
-    <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.ipc.address</name>
-    <value>0.0.0.0:8010</value>
-    <description>
-      The datanode ipc server address and port.
-      If the port is 0 then the server will start on a free port.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.blockreport.initialDelay</name>
-    <value>120</value>
-    <description>Delay for first block report in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.du.pct</name>
-    <value>0.85f</value>
-    <description>When calculating remaining space, only use this percentage of the real available space
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.handler.count</name>
-    <value>40</value>
-    <description>The number of server threads for the namenode.</description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.max.xcievers</name>
-    <value>4096</value>
-    <description>PRIVATE CONFIG VARIABLE</description>
-  </property>
-
-  <!-- Permissions configuration -->
-
-  <property>
-    <name>dfs.umaskmode</name>
-    <value>077</value>
-    <description>
-      The octal umask used when creating files and directories.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.web.ugi</name>
-    <!-- cluster variant -->
-    <value>gopher,gopher</value>
-    <description>The user account used by the web interface.
-      Syntax: USERNAME,GROUP1,GROUP2, ...
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.permissions</name>
-    <value>true</value>
-    <description>
-      If "true", enable permission checking in HDFS.
-      If "false", permission checking is turned off,
-      but all other behavior is unchanged.
-      Switching from one parameter value to the other does not change the mode,
-      owner or group of files or directories.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.permissions.supergroup</name>
-    <value>hdfs</value>
-    <description>The name of the group of super-users.</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.handler.count</name>
-    <value>100</value>
-    <description>Added to grow Queue size so that more client connections are allowed</description>
-  </property>
-
-  <property>
-    <name>ipc.server.max.response.size</name>
-    <value>5242880</value>
-  </property>
-  <property>
-    <name>dfs.block.access.token.enable</name>
-    <value>true</value>
-    <description>
-      If "true", access tokens are used as capabilities for accessing datanodes.
-      If "false", no access tokens are checked on accessing datanodes.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.kerberos.principal</name>
-    <value></value>
-    <description>
-      Kerberos principal name for the NameNode
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.kerberos.principal</name>
-    <value></value>
-    <description>
-      Kerberos principal name for the secondary NameNode.
-    </description>
-  </property>
-
-
-  <!--
-    This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
-  -->
-  <property>
-    <name>dfs.namenode.kerberos.https.principal</name>
-    <value></value>
-    <description>The Kerberos principal for the host that the NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.kerberos.https.principal</name>
-    <value></value>
-    <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <!-- cluster variant -->
-    <name>dfs.secondary.http.address</name>
-    <value>localhost:50090</value>
-    <description>Address of secondary namenode web server</description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.https.port</name>
-    <value>50490</value>
-    <description>The https port where secondary-namenode binds</description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.principal</name>
-    <value></value>
-    <description>
-      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
-      HTTP SPENGO specification.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.keytab</name>
-    <value></value>
-    <description>
-      The Kerberos keytab file with the credentials for the
-      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.kerberos.principal</name>
-    <value></value>
-    <description>
-      The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.keytab.file</name>
-    <value></value>
-    <description>
-      Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.keytab.file</name>
-    <value></value>
-    <description>
-      Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.keytab.file</name>
-    <value></value>
-    <description>
-      The filename of the keytab file for the DataNode.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.https.port</name>
-    <value>50470</value>
-    <description>The https port where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.https.address</name>
-    <value>localhost:50470</value>
-    <description>The https address where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.datanode.data.dir.perm</name>
-    <value>750</value>
-    <description>The permissions that should be there on dfs.data.dir
-      directories. The datanode will not come up if the permissions are
-      different on existing dfs.data.dir directories. If the directories
-      don't exist, they will be created with this permission.</description>
-  </property>
-
-  <property>
-    <name>dfs.access.time.precision</name>
-    <value>0</value>
-    <description>The access time for HDFS file is precise upto this value.
-      The default value is 1 hour. Setting a value of 0 disables
-      access times for HDFS.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.cluster.administrators</name>
-    <value> hdfs</value>
-    <description>ACL for who all can view the default servlets in the HDFS</description>
-  </property>
-
-  <property>
-    <name>ipc.server.read.threadpool.size</name>
-    <value>5</value>
-    <description></description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.failed.volumes.tolerated</name>
-    <value>0</value>
-    <description>Number of failed disks datanode would tolerate</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.avoid.read.stale.datanode</name>
-    <value>true</value>
-    <description>
-      Indicate whether or not to avoid reading from stale datanodes whose
-      heartbeat messages have not been received by the namenode for more than a
-      specified time interval.
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.avoid.write.stale.datanode</name>
-    <value>true</value>
-    <description>
-      Indicate whether or not to avoid writing to stale datanodes whose
-      heartbeat messages have not been received by the namenode for more than a
-      specified time interval.
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.write.stale.datanode.ratio</name>
-    <value>1.0f</value>
-    <description>When the ratio of number stale datanodes to total datanodes marked is greater
-      than this ratio, stop avoiding writing to stale nodes so as to prevent causing hotspots.
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.stale.datanode.interval</name>
-    <value>30000</value>
-    <description>Datanode is stale after not getting a heartbeat in this interval in ms</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HDFS/metainfo.xml
deleted file mode 100644
index e33be3c..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HDFS/metainfo.xml
+++ /dev/null
@@ -1,46 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Apache Hadoop Distributed File System</comment>
-    <version>1.2.0.1.3.3.0</version>
-
-    <components>
-        <component>
-            <name>NAMENODE</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>DATANODE</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>SECONDARY_NAMENODE</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>HDFS_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HIVE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HIVE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HIVE/configuration/global.xml
deleted file mode 100644
index d9adc80..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HIVE/configuration/global.xml
+++ /dev/null
@@ -1,125 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>hivemetastore_host</name>
-    <value></value>
-    <description>Hive Metastore host.</description>
-  </property>
-  <property>
-    <name>hive_database</name>
-    <value></value>
-    <description>Hive database name.</description>
-  </property>
-  <property>
-    <name>hive_existing_mysql_database</name>
-    <value></value>
-    <description>Hive database name.</description>
-  </property>
-  <property>
-    <name>hive_existing_mysql_host</name>
-    <value></value>
-    <description></description>
-  </property>
-  <property>
-    <name>hive_existing_oracle_database</name>
-    <value></value>
-    <description>Hive database name.</description>
-  </property>
-  <property>
-    <name>hive_existing_oracle_host</name>
-    <value></value>
-    <description></description>
-  </property>
-  <property>
-    <name>hive_ambari_database</name>
-    <value>MySQL</value>
-    <description>Database type.</description>
-  </property>  
-  <property>
-    <name>hive_ambari_host</name>
-    <value></value>
-    <description>Database hostname.</description>
-  </property>
-  <property>
-    <name>hive_database_name</name>
-    <value></value>
-    <description>Database hname</description>
-  </property>    
-  <property>
-    <name>hive_metastore_user_name</name>
-    <value>hive</value>
-    <description>Database username to use to connect to the database.</description>
-  </property>    
-  <property>
-    <name>hive_metastore_user_passwd</name>
-    <value></value>
-    <description>Database password to use to connect to the database.</description>
-  </property>    
-  <property>
-    <name>hive_metastore_port</name>
-    <value>9083</value>
-    <description>Hive Metastore port.</description>
-  </property>    
-  <property>
-    <name>hive_lib</name>
-    <value>/usr/lib/hive/lib/</value>
-    <description>Hive Library.</description>
-  </property>    
-  <property>
-    <name>hive_dbroot</name>
-    <value>/usr/lib/hive/lib/</value>
-    <description>Hive DB Directory.</description>
-  </property>      
-  <property>
-    <name>hive_conf_dir</name>
-    <value>/etc/hive/conf</value>
-    <description>Hive Conf Dir.</description>
-  </property>
-  <property>
-    <name>hive_log_dir</name>
-    <value>/var/log/hive</value>
-    <description>Directory for Hive Log files.</description>
-  </property>
-  <property>
-    <name>hive_pid_dir</name>
-    <value>/var/run/hive</value>
-    <description>Hive PID Dir.</description>
-  </property>
-  <property>
-    <name>mysql_connector_url</name>
-    <value>${download_url}/mysql-connector-java-5.1.18.zip</value>
-    <description>Hive PID Dir.</description>
-  </property>
-  <property>
-    <name>hive_aux_jars_path</name>
-    <value>/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar</value>
-    <description>Hive auxiliary jar path.</description>
-  </property>
-  <property>
-    <name>hive_user</name>
-    <value>hive</value>
-    <description>Hive User.</description>
-  </property>
-  
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HIVE/configuration/hive-site.xml
deleted file mode 100644
index 1337fa4..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HIVE/configuration/hive-site.xml
+++ /dev/null
@@ -1,230 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<configuration>
-  <property>
-    <name>javax.jdo.option.ConnectionURL</name>
-    <value>jdbc</value>
-    <description>JDBC connect string for a JDBC metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionDriverName</name>
-    <value>com.mysql.jdbc.Driver</value>
-    <description>Driver class name for a JDBC metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionUserName</name>
-    <value>hive</value>
-    <description>username to use against metastore database</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionPassword</name>
-    <value> </value>
-    <description>password to use against metastore database</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.warehouse.dir</name>
-    <value>/apps/hive/warehouse</value>
-    <description>location of default database for the warehouse</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.sasl.enabled</name>
-    <value></value>
-    <description>If true, the metastore thrift interface will be secured with SASL.
-      Clients must authenticate with Kerberos.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.kerberos.keytab.file</name>
-    <value></value>
-    <description>The path to the Kerberos Keytab file containing the metastore
-      thrift server's service principal.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.kerberos.principal</name>
-    <value></value>
-    <description>The service principal for the metastore thrift server. The special
-      string _HOST will be replaced automatically with the correct host name.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.cache.pinobjtypes</name>
-    <value>Table,Database,Type,FieldSchema,Order</value>
-    <description>List of comma separated metastore object types that should be pinned in the cache</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.uris</name>
-    <value>thrift://localhost:9083</value>
-    <description>URI for client to contact metastore server</description>
-  </property>
-
-  <property>
-    <name>hive.semantic.analyzer.factory.impl</name>
-    <value>org.apache.hivealog.cli.HCatSemanticAnalyzerFactory</value>
-    <description>controls which SemanticAnalyzerFactory implemenation class is used by CLI</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.client.socket.timeout</name>
-    <value>60</value>
-    <description>MetaStore Client socket timeout in seconds</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.execute.setugi</name>
-    <value>true</value>
-    <description>In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.</description>
-  </property>
-
-  <property>
-    <name>hive.security.authorization.enabled</name>
-    <value>false</value>
-    <description>enable or disable the hive client authorization</description>
-  </property>
-
-  <property>
-    <name>hive.security.authorization.manager</name>
-    <value>org.apache.hcatalog.security.HdfsAuthorizationProvider</value>
-    <description>the hive client authorization manager class name.
-      The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
-  </property>
-
-  <property>
-    <name>hive.server2.enable.doAs</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>fs.hdfs.impl.disable.cache</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>fs.file.impl.disable.cache</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.enforce.bucketing</name>
-    <value>true</value>
-    <description>Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced.</description>
-  </property>
-
-  <property>
-    <name>hive.enforce.sorting</name>
-    <value>true</value>
-    <description>Whether sorting is enforced. If true, while inserting into the table, sorting is enforced.</description>
-  </property>
-
-  <property>
-    <name>hive.map.aggr</name>
-    <value>true</value>
-    <description>Whether to use map-side aggregation in Hive Group By queries.</description>
-  </property>
-
-  <property>
-    <name>hive.optimize.bucketmapjoin</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.optimize.bucketmapjoin.sortedmerge</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.mapred.reduce.tasks.speculative.execution</name>
-    <value>false</value>
-    <description>Whether speculative execution for reducers should be turned on.</description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join</name>
-    <value>true</value>
-    <description>Whether Hive enable the optimization about converting common
-      join into mapjoin based on the input file size.</description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.sortmerge.join</name>
-    <value>true</value>
-    <description>Will the join be automatically converted to a sort-merge join, if the joined tables pass
-      the criteria for sort-merge join.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.sortmerge.join.noconditionaltask</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join.noconditionaltask</name>
-    <value>true</value>
-    <description>Whether Hive enable the optimization about converting common join into mapjoin based on the input file
-      size. If this paramater is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the
-      specified size, the join is directly converted to a mapjoin (there is no conditional task).
-    </description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join.noconditionaltask.size</name>
-    <value>1000000000</value>
-    <description>If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. However, if it
-      is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, the join is directly
-      converted to a mapjoin(there is no conditional task). The default is 10MB.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.optimize.reducededuplication.min.reducer</name>
-    <value>1</value>
-    <description>Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS.
-      That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.
-      The optimization will be disabled if number of reducers is less than specified value.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.optimize.mapjoin.mapreduce</name>
-    <value>true</value>
-    <description>If hive.auto.convert.join is off, this parameter does not take
-      affect. If it is on, and if there are map-join jobs followed by a map-reduce
-      job (for e.g a group by), each map-only job is merged with the following
-      map-reduce job.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.mapjoin.bucket.cache.size</name>
-    <value>10000</value>
-    <description>
-      Size per reducer.The default is 1G, i.e if the input size is 10G, it
-      will use 10 reducers.
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HIVE/metainfo.xml
deleted file mode 100644
index 4ee02e6..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HIVE/metainfo.xml
+++ /dev/null
@@ -1,43 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
-    <version>0.11.0.1.3.3.0</version>
-
-    <components>        
-        <component>
-            <name>HIVE_METASTORE</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>HIVE_SERVER</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>MYSQL_SERVER</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>HIVE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HUE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HUE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HUE/configuration/global.xml
deleted file mode 100644
index c49480f..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HUE/configuration/global.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>hue_pid_dir</name>
-    <value>/var/run/hue</value>
-    <description>Hue Pid Dir.</description>
-  </property>
-  <property>
-    <name>hue_log_dir</name>
-    <value>/var/log/hue</value>
-    <description>Hue Log Dir.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HUE/configuration/hue-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HUE/configuration/hue-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HUE/configuration/hue-site.xml
deleted file mode 100644
index 6eb52a2..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HUE/configuration/hue-site.xml
+++ /dev/null
@@ -1,290 +0,0 @@
-<?xml version="1.0"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more# Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with# contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.# this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0# The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with# (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at# the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0#     http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software# Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,# distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and# See the License for the specific language governing permissions and
-   limitations under the License.# limitations under the License.
--->
-
-<configuration>
-  <!-- General Hue server configuration properties -->
-  <property>
-      <name>send_debug_messages</name>
-      <value>1</value>
-      <description></description>
-  </property>
-
-  <property>
-    <name>database_logging</name>
-    <value>0</value>
-    <description>To show database transactions, set database_logging to 1.
-      default, database_logging=0</description>
-  </property>
-
-  <property>
-    <name>secret_key</name>
-    <value></value>
-    <description>This is used for secure hashing in the session store.</description>
-  </property>
-
-  <property>
-    <name>http_host</name>
-    <value>0.0.0.0</value>
-    <description>Webserver listens on this address and port</description>
-  </property>
-
-  <property>
-    <name>http_port</name>
-    <value>8000</value>
-    <description>Webserver listens on this address and port</description>
-  </property>
-
-  <property>
-    <name>time_zone</name>
-    <value>America/Los_Angeles</value>
-    <description>Time zone name</description>
-  </property>
-
-  <property>
-    <name>django_debug_mode</name>
-    <value>1</value>
-    <description>Turn off debug</description>
-  </property>
-
-  <property>
-    <name>use_cherrypy_server</name>
-    <value>false</value>
-    <description>Set to true to use CherryPy as the webserver, set to false
-      to use Spawning as the webserver. Defaults to Spawning if
-      key is not specified.</description>
-  </property>
-
-  <property>
-    <name>http_500_debug_mode</name>
-    <value>1</value>
-    <description>Turn off backtrace for server error</description>
-  </property>
-
-  <property>
-    <name>server_user</name>
-    <value></value>
-    <description>Webserver runs as this user</description>
-  </property>
-
-  <property>
-    <name>server_group</name>
-    <value></value>
-    <description>Webserver runs as this user</description>
-  </property>
-
-  <property>
-    <name>backend_auth_policy</name>
-    <value>desktop.auth.backend.AllowAllBackend</value>
-    <description>Authentication backend.</description>
-  </property>
-
-  <!-- Hue Database configuration properties -->
-  <property>
-    <name>db_engine</name>
-    <value>mysql</value>
-    <description>Configuration options for specifying the Desktop Database.</description>
-  </property>
-
-  <property>
-    <name>db_host</name>
-    <value>localhost</value>
-    <description>Configuration options for specifying the Desktop Database.</description>
-  </property>
-
-  <property>
-    <name>db_port</name>
-    <value>3306</value>
-    <description>Configuration options for specifying the Desktop Database.</description>
-  </property>
-
-  <property>
-    <name>db_user</name>
-    <value>sandbox</value>
-    <description>Configuration options for specifying the Desktop Database.</description>
-  </property>
-
-  <property>
-    <name>db_password</name>
-    <value>1111</value>
-    <description>Configuration options for specifying the Desktop Database.</description>
-  </property>
-
-  <property>
-    <name>db_name</name>
-    <value>sandbox</value>
-    <description>Configuration options for specifying the Desktop Database.</description>
-  </property>
-
-  <!-- Hue Email configuration properties -->
-  <property>
-    <name>smtp_host</name>
-    <value>localhost</value>
-    <description>The SMTP server information for email notification delivery.</description>
-  </property>
-
-  <property>
-    <name>smtp_port</name>
-    <value>25</value>
-    <description>The SMTP server information for email notification delivery.</description>
-  </property>
-
-  <property>
-    <name>smtp_user</name>
-    <value></value>
-    <description>The SMTP server information for email notification delivery.</description>
-  </property>
-
-  <property>
-    <name>smtp_password</name>
-    <value>25</value>
-    <description>The SMTP server information for email notification delivery.</description>
-  </property>
-
-  <property>
-    <name>tls</name>
-    <value>no</value>
-    <description>Whether to use a TLS (secure) connection when talking to the SMTP server.</description>
-  </property>
-
-  <property>
-    <name>default_from_email</name>
-    <value>sandbox@hortonworks.com</value>
-    <description>The SMTP server information for email notification delivery.</description>
-  </property>
-
-  <!-- Hue Hadoop configuration properties -->
-  <property>
-    <name>fs_defaultfs</name>
-    <value></value>
-    <description>Enter the filesystem uri. E.g
-      .:hdfs://sandbox:8020</description>
-  </property>
-
-  <property>
-    <name>webhdfs_url</name>
-    <value></value>
-    <description>Use WebHdfs/HttpFs as the communication mechanism. To fallback to
-      using the Thrift plugin (used in Hue 1.x), this must be uncommented
-      and explicitly set to the empty value.
-      Value e.g.: http://localhost:50070/webhdfs/v1/</description>
-  </property>
-
-  <property>
-    <name>jobtracker_host</name>
-    <value></value>
-    <description>Enter the host on which you are running the Hadoop JobTracker.</description>
-  </property>
-
-  <property>
-    <name>jobtracker_port</name>
-    <value>50030</value>
-    <description>The port where the JobTracker IPC listens on.</description>
-  </property>
-
-  <property>
-    <name>hadoop_mapred_home</name>
-    <value>/usr/lib/hadoop/lib</value>
-    <description>The SMTP server information for email notification delivery.</description>
-  </property>
-
-  <property>
-    <name>resourcemanager_host</name>
-    <value></value>
-    <description>Enter the host on which you are running the ResourceManager.</description>
-  </property>
-
-  <property>
-    <name>resourcemanager_port</name>
-    <value></value>
-    <description>The port where the ResourceManager IPC listens on.</description>
-  </property>
-
-  <!-- Hue Beeswax configuration properties -->
-  <property>
-    <name>hive_home_dir</name>
-    <value></value>
-    <description>Hive home directory.</description>
-  </property>
-
-  <property>
-    <name>hive_conf_dir</name>
-    <value></value>
-    <description>Hive configuration directory, where hive-site.xml is
-      located.</description>
-  </property>
-
-  <property>
-    <name>templeton_url</name>
-    <value></value>
-    <description>WebHcat http URL</description>
-  </property>
-
-  <!-- Hue shell types configuration -->
-  <property>
-    <name>pig_nice_name</name>
-    <value></value>
-    <description>Define and configure a new shell type pig</description>
-  </property>
-
-  <property>
-    <name>pig_shell_command</name>
-    <value>/usr/bin/pig -l /dev/null</value>
-    <description>Define and configure a new shell type pig.</description>
-  </property>
-
-  <property>
-    <name>pig_java_home</name>
-    <value></value>
-    <description>Define and configure a new shell type pig.</description>
-  </property>
-
-  <property>
-    <name>hbase_nice_name</name>
-    <value>HBase Shell</value>
-    <description>Define and configure a new shell type hbase</description>
-  </property>
-
-  <property>
-    <name>hbase_shell_command</name>
-    <value>/usr/bin/hbase shell</value>
-    <description>Define and configure a new shell type hbase.</description>
-  </property>
-
-  <property>
-    <name>bash_nice_name</name>
-    <value></value>
-    <description>Define and configure a new shell type bash for testing
-      only</description>
-  </property>
-
-  <property>
-    <name>bash_shell_command</name>
-    <value>/bin/bash</value>
-    <description>Define and configure a new shell type bash for testing only
-      .</description>
-  </property>
-
-  <!-- Hue Settings for the User Admin application -->
-  <property>
-    <name>whitelist</name>
-    <value>(localhost|127\.0\.0\.1):(50030|50070|50060|50075|50111)</value>
-    <description>proxy settings</description>
-  </property>
-
-</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HUE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HUE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HUE/metainfo.xml
deleted file mode 100644
index ba580ca..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HUE/metainfo.xml
+++ /dev/null
@@ -1,31 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Hue is a graphical user interface to operate and develop
-      applications for Apache Hadoop.</comment>
-    <version>2.2.0.1.3.3.0</version>
-
-    <components>
-        <component>
-            <name>HUE_SERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/MAPREDUCE/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/MAPREDUCE/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/MAPREDUCE/configuration/capacity-scheduler.xml
deleted file mode 100644
index 8034d19..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/MAPREDUCE/configuration/capacity-scheduler.xml
+++ /dev/null
@@ -1,195 +0,0 @@
-<?xml version="1.0"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- This is the configuration file for the resource manager in Hadoop. -->
-<!-- You can configure various scheduling parameters related to queues. -->
-<!-- The properties for a queue follow a naming convention,such as, -->
-<!-- mapred.capacity-scheduler.queue.<queue-name>.property-name. -->
-
-<configuration>
-
-  <property>
-    <name>mapred.capacity-scheduler.maximum-system-jobs</name>
-    <value>3000</value>
-    <description>Maximum number of jobs in the system which can be initialized,
-     concurrently, by the CapacityScheduler.
-    </description>    
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.capacity</name>
-    <value>100</value>
-    <description>Percentage of the number of slots in the cluster that are
-      to be available for jobs in this queue.
-    </description>    
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.maximum-capacity</name>
-    <value>-1</value>
-    <description>
-	maximum-capacity defines a limit beyond which a queue cannot use the capacity of the cluster.
-	This provides a means to limit how much excess capacity a queue can use. By default, there is no limit.
-	The maximum-capacity of a queue can only be greater than or equal to its minimum capacity.
-        Default value of -1 implies a queue can use complete capacity of the cluster.
-
-        This property could be to curtail certain jobs which are long running in nature from occupying more than a 
-        certain percentage of the cluster, which in the absence of pre-emption, could lead to capacity guarantees of 
-        other queues being affected.
-        
-        One important thing to note is that maximum-capacity is a percentage , so based on the cluster's capacity
-        the max capacity would change. So if large no of nodes or racks get added to the cluster , max Capacity in 
-        absolute terms would increase accordingly.
-    </description>    
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.supports-priority</name>
-    <value>false</value>
-    <description>If true, priorities of jobs will be taken into 
-      account in scheduling decisions.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.minimum-user-limit-percent</name>
-    <value>100</value>
-    <description> Each queue enforces a limit on the percentage of resources 
-    allocated to a user at any given time, if there is competition for them. 
-    This user limit can vary between a minimum and maximum value. The former
-    depends on the number of users who have submitted jobs, and the latter is
-    set to this property value. For example, suppose the value of this 
-    property is 25. If two users have submitted jobs to a queue, no single 
-    user can use more than 50% of the queue resources. If a third user submits
-    a job, no single user can use more than 33% of the queue resources. With 4 
-    or more users, no user can use more than 25% of the queue's resources. A 
-    value of 100 implies no user limits are imposed. 
-    </description>
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.user-limit-factor</name>
-    <value>1</value>
-    <description>The multiple of the queue capacity which can be configured to 
-    allow a single user to acquire more slots. 
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks</name>
-    <value>200000</value>
-    <description>The maximum number of tasks, across all jobs in the queue, 
-    which can be initialized concurrently. Once the queue's jobs exceed this 
-    limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks-per-user</name>
-    <value>100000</value>
-    <description>The maximum number of tasks per-user, across all the of the 
-    user's jobs in the queue, which can be initialized concurrently. Once the 
-    user's jobs exceed this limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.init-accept-jobs-factor</name>
-    <value>10</value>
-    <description>The multipe of (maximum-system-jobs * queue-capacity) used to 
-    determine the number of jobs which are accepted by the scheduler.  
-    </description>
-  </property>
-
-  <!-- The default configuration settings for the capacity task scheduler -->
-  <!-- The default values would be applied to all the queues which don't have -->
-  <!-- the appropriate property for the particular queue -->
-  <property>
-    <name>mapred.capacity-scheduler.default-supports-priority</name>
-    <value>false</value>
-    <description>If true, priorities of jobs will be taken into 
-      account in scheduling decisions by default in a job queue.
-    </description>
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.default-minimum-user-limit-percent</name>
-    <value>100</value>
-    <description>The percentage of the resources limited to a particular user
-      for the job queue at any given point of time by default.
-    </description>
-  </property>
-
-
-  <property>
-    <name>mapred.capacity-scheduler.default-user-limit-factor</name>
-    <value>1</value>
-    <description>The default multiple of queue-capacity which is used to 
-    determine the amount of slots a single user can consume concurrently.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.default-maximum-active-tasks-per-queue</name>
-    <value>200000</value>
-    <description>The default maximum number of tasks, across all jobs in the 
-    queue, which can be initialized concurrently. Once the queue's jobs exceed 
-    this limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.default-maximum-active-tasks-per-user</name>
-    <value>100000</value>
-    <description>The default maximum number of tasks per-user, across all the of 
-    the user's jobs in the queue, which can be initialized concurrently. Once 
-    the user's jobs exceed this limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.default-init-accept-jobs-factor</name>
-    <value>10</value>
-    <description>The default multipe of (maximum-system-jobs * queue-capacity) 
-    used to determine the number of jobs which are accepted by the scheduler.  
-    </description>
-  </property>
-
-  <!-- Capacity scheduler Job Initialization configuration parameters -->
-  <property>
-    <name>mapred.capacity-scheduler.init-poll-interval</name>
-    <value>5000</value>
-    <description>The amount of time in miliseconds which is used to poll 
-    the job queues for jobs to initialize.
-    </description>
-  </property>
-  <property>
-    <name>mapred.capacity-scheduler.init-worker-threads</name>
-    <value>5</value>
-    <description>Number of worker threads which would be used by
-    Initialization poller to initialize jobs in a set of queue.
-    If number mentioned in property is equal to number of job queues
-    then a single thread would initialize jobs in a queue. If lesser
-    then a thread would get a set of queues assigned. If the number
-    is greater then number of threads would be equal to number of 
-    job queues.
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/MAPREDUCE/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/MAPREDUCE/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/MAPREDUCE/configuration/core-site.xml
deleted file mode 100644
index 3a2af49..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/MAPREDUCE/configuration/core-site.xml
+++ /dev/null
@@ -1,20 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<configuration>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/MAPREDUCE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/MAPREDUCE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/MAPREDUCE/configuration/global.xml
deleted file mode 100644
index 2fc33c9..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/MAPREDUCE/configuration/global.xml
+++ /dev/null
@@ -1,160 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>jobtracker_host</name>
-    <value></value>
-    <description>JobTracker Host.</description>
-  </property>
-  <property>
-    <name>tasktracker_hosts</name>
-    <value></value>
-    <description>TaskTracker hosts.</description>
-  </property>
-  <property>
-    <name>mapred_local_dir</name>
-    <value>/hadoop/mapred</value>
-    <description>MapRed Local Directories.</description>
-  </property>
-  <property>
-    <name>mapred_system_dir</name>
-    <value>/mapred/system</value>
-    <description>MapRed System Directories.</description>
-  </property>
-  <property>
-    <name>scheduler_name</name>
-    <value>org.apache.hadoop.mapred.CapacityTaskScheduler</value>
-    <description>MapRed Capacity Scheduler.</description>
-  </property>
-  <property>
-    <name>jtnode_opt_newsize</name>
-    <value>200</value>
-    <description>Mem New Size.</description>
-  </property>
-  <property>
-    <name>jtnode_opt_maxnewsize</name>
-    <value>200</value>
-    <description>Max New size.</description>
-  </property>
-  <property>
-    <name>hadoop_heapsize</name>
-    <value>1024</value>
-    <description>Hadoop maximum Java heap size</description>
-  </property>
-  <property>
-    <name>jtnode_heapsize</name>
-    <value>1024</value>
-    <description>Maximum Java heap size for JobTracker in MB (Java option -Xmx)</description>
-  </property>
-  <property>
-    <name>mapred_map_tasks_max</name>
-    <value>4</value>
-    <description>Number of slots that Map tasks that run simultaneously can occupy on a TaskTracker</description>
-  </property>
-  <property>
-    <name>mapred_red_tasks_max</name>
-    <value>2</value>
-    <description>Number of slots that Reduce tasks that run simultaneously can occupy on a TaskTracker</description>
-  </property>
-  <property>
-    <name>mapred_cluster_map_mem_mb</name>
-    <value>-1</value>
-    <description>The virtual memory size of a single Map slot in the MapReduce framework</description>
-  </property>
-  <property>
-    <name>mapred_cluster_red_mem_mb</name>
-    <value>-1</value>
-    <description>The virtual memory size of a single Reduce slot in the MapReduce framework</description>
-  </property>
-  <property>
-    <name>mapred_job_map_mem_mb</name>
-    <value>-1</value>
-    <description>Virtual memory for single Map task</description>
-  </property>
-  <property>
-    <name>mapred_child_java_opts_sz</name>
-    <value>768</value>
-    <description>Java options for the TaskTracker child processes.</description>
-  </property>
-  <property>
-    <name>io_sort_mb</name>
-    <value>200</value>
-    <description>The total amount of Map-side buffer memory to use while sorting files (Expert-only configuration).</description>
-  </property>
-  <property>
-    <name>io_sort_spill_percent</name>
-    <value>0.9</value>
-    <description>Percentage of sort buffer used for record collection (Expert-only configuration.</description>
-  </property>
-  <property>
-    <name>mapreduce_userlog_retainhours</name>
-    <value>24</value>
-    <description>The maximum time, in hours, for which the user-logs are to be retained after the job completion.</description>
-  </property>
-  <property>
-    <name>maxtasks_per_job</name>
-    <value>-1</value>
-    <description>Maximum number of tasks for a single Job</description>
-  </property>
-  <property>
-    <name>lzo_enabled</name>
-    <value>true</value>
-    <description>LZO compression enabled</description>
-  </property>
-  <property>
-    <name>snappy_enabled</name>
-    <value>true</value>
-    <description>LZO compression enabled</description>
-  </property>
-  <property>
-    <name>rca_enabled</name>
-    <value>true</value>
-    <description>Enable Job Diagnostics.</description>
-  </property>
-  <property>
-    <name>mapred_hosts_exclude</name>
-    <value></value>
-    <description>Exclude entered hosts</description>
-  </property>
-  <property>
-    <name>mapred_hosts_include</name>
-    <value></value>
-    <description>Include entered hosts</description>
-  </property>
-  <property>
-    <name>mapred_jobstatus_dir</name>
-    <value>file:////mapred/jobstatus</value>
-    <description>Job Status directory</description>
-  </property>
-  <property>
-    <name>task_controller</name>
-    <value>org.apache.hadoop.mapred.DefaultTaskController</value>
-    <description>Task Controller.</description>
-  </property>
-  <property>
-    <name>mapred_user</name>
-    <value>mapred</value>
-    <description>MapReduce User.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/MAPREDUCE/configuration/mapred-queue-acls.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/MAPREDUCE/configuration/mapred-queue-acls.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/MAPREDUCE/configuration/mapred-queue-acls.xml
deleted file mode 100644
index ce12380..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/MAPREDUCE/configuration/mapred-queue-acls.xml
+++ /dev/null
@@ -1,39 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- mapred-queue-acls.xml -->
-<configuration>
-
-
-<!-- queue default -->
-
-  <property>
-    <name>mapred.queue.default.acl-submit-job</name>
-    <value>*</value>
-  </property>
-
-  <property>
-    <name>mapred.queue.default.acl-administer-jobs</name>
-    <value>*</value>
-  </property>
-
-  <!-- END ACLs -->
-
-</configuration>


[08/14] AMBARI-3777. Remove HDPLocal stack from stack definition. (yusaku)

Posted by yu...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/hdfs-site.xml
deleted file mode 100644
index 15666e0..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/hdfs-site.xml
+++ /dev/null
@@ -1,454 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-  <!-- file system properties -->
-
-  <property>
-    <name>dfs.name.dir</name>
-    <!-- cluster variant -->
-    <value>/hadoop/hdfs/namenode</value>
-    <description>Determines where on the local filesystem the DFS name node
-      should store the name table.  If this is a comma-delimited list
-      of directories then the name table is replicated in all of the
-      directories, for redundancy. </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.support.append</name>
-    <value>true</value>
-    <description>to enable dfs append</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.webhdfs.enabled</name>
-    <value>true</value>
-    <description>to enable webhdfs</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.datanode.socket.write.timeout</name>
-    <value>0</value>
-    <description>DFS Client write socket timeout</description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.failed.volumes.tolerated</name>
-    <value>0</value>
-    <description>#of failed disks dn would tolerate</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.block.local-path-access.user</name>
-    <value>hbase</value>
-    <description>the user who is allowed to perform short
-      circuit reads.
-    </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.data.dir</name>
-    <value>/hadoop/hdfs/data</value>
-    <description>Determines where on the local filesystem an DFS data node
-      should store its blocks.  If this is a comma-delimited
-      list of directories, then data will be stored in all named
-      directories, typically on different devices.
-      Directories that do not exist are ignored.
-    </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.hosts.exclude</name>
-    <value>/etc/hadoop/conf/dfs.exclude</value>
-    <description>Names a file that contains a list of hosts that are
-      not permitted to connect to the namenode.  The full pathname of the
-      file must be specified.  If the value is empty, no hosts are
-      excluded.</description>
-  </property>
-
-  <property>
-    <name>dfs.hosts</name>
-    <value>/etc/hadoop/conf/dfs.include</value>
-    <description>Names a file that contains a list of hosts that are
-      permitted to connect to the namenode. The full pathname of the file
-      must be specified.  If the value is empty, all hosts are
-      permitted.</description>
-  </property>
-
-  <property>
-    <name>dfs.replication.max</name>
-    <value>50</value>
-    <description>Maximal block replication.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.replication</name>
-    <value>3</value>
-    <description>Default block replication.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.heartbeat.interval</name>
-    <value>3</value>
-    <description>Determines datanode heartbeat interval in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.safemode.threshold.pct</name>
-    <value>1.0f</value>
-    <description>
-      Specifies the percentage of blocks that should satisfy
-      the minimal replication requirement defined by dfs.replication.min.
-      Values less than or equal to 0 mean not to start in safe mode.
-      Values greater than 1 will make safe mode permanent.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.balance.bandwidthPerSec</name>
-    <value>6250000</value>
-    <description>
-      Specifies the maximum amount of bandwidth that each datanode
-      can utilize for the balancing purpose in term of
-      the number of bytes per second.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.https.port</name>
-    <value>50070</value>
-    <description>
-      This property is used by HftpFileSystem.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.address</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>dfs.datanode.http.address</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>dfs.block.size</name>
-    <value>134217728</value>
-    <description>The default block size for new files.</description>
-  </property>
-
-  <property>
-    <name>dfs.http.address</name>
-    <value>localhost:50070</value>
-    <description>The name of the default file system.  Either the
-      literal string "local" or a host:port for NDFS.</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.datanode.du.reserved</name>
-    <!-- cluster variant -->
-    <value>1073741824</value>
-    <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.ipc.address</name>
-    <value>0.0.0.0:8010</value>
-    <description>
-      The datanode ipc server address and port.
-      If the port is 0 then the server will start on a free port.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.blockreport.initialDelay</name>
-    <value>120</value>
-    <description>Delay for first block report in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.du.pct</name>
-    <value>0.85f</value>
-    <description>When calculating remaining space, only use this percentage of the real available space
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.handler.count</name>
-    <value>40</value>
-    <description>The number of server threads for the namenode.</description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.max.xcievers</name>
-    <value>4096</value>
-    <description>PRIVATE CONFIG VARIABLE</description>
-  </property>
-
-  <!-- Permissions configuration -->
-
-  <property>
-    <name>dfs.umaskmode</name>
-    <value>077</value>
-    <description>
-      The octal umask used when creating files and directories.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.web.ugi</name>
-    <!-- cluster variant -->
-    <value>gopher,gopher</value>
-    <description>The user account used by the web interface.
-      Syntax: USERNAME,GROUP1,GROUP2, ...
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.permissions</name>
-    <value>true</value>
-    <description>
-      If "true", enable permission checking in HDFS.
-      If "false", permission checking is turned off,
-      but all other behavior is unchanged.
-      Switching from one parameter value to the other does not change the mode,
-      owner or group of files or directories.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.permissions.supergroup</name>
-    <value>hdfs</value>
-    <description>The name of the group of super-users.</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.handler.count</name>
-    <value>100</value>
-    <description>Added to grow Queue size so that more client connections are allowed</description>
-  </property>
-
-  <property>
-    <name>ipc.server.max.response.size</name>
-    <value>5242880</value>
-  </property>
-  <property>
-    <name>dfs.block.access.token.enable</name>
-    <value>true</value>
-    <description>
-      If "true", access tokens are used as capabilities for accessing datanodes.
-      If "false", no access tokens are checked on accessing datanodes.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.kerberos.principal</name>
-    <value></value>
-    <description>
-      Kerberos principal name for the NameNode
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.kerberos.principal</name>
-    <value></value>
-    <description>
-      Kerberos principal name for the secondary NameNode.
-    </description>
-  </property>
-
-
-  <!--
-    This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
-  -->
-  <property>
-    <name>dfs.namenode.kerberos.https.principal</name>
-    <value></value>
-    <description>The Kerberos principal for the host that the NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.kerberos.https.principal</name>
-    <value></value>
-    <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <!-- cluster variant -->
-    <name>dfs.secondary.http.address</name>
-    <value>localhost:50090</value>
-    <description>Address of secondary namenode web server</description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.https.port</name>
-    <value>50490</value>
-    <description>The https port where secondary-namenode binds</description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.principal</name>
-    <value></value>
-    <description>
-      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
-      HTTP SPENGO specification.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.keytab</name>
-    <value></value>
-    <description>
-      The Kerberos keytab file with the credentials for the
-      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.kerberos.principal</name>
-    <value></value>
-    <description>
-      The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.keytab.file</name>
-    <value></value>
-    <description>
-      Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.keytab.file</name>
-    <value></value>
-    <description>
-      Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.keytab.file</name>
-    <value></value>
-    <description>
-      The filename of the keytab file for the DataNode.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.https.port</name>
-    <value>50470</value>
-    <description>The https port where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.https.address</name>
-    <value>localhost:50470</value>
-    <description>The https address where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.datanode.data.dir.perm</name>
-    <value>750</value>
-    <description>The permissions that should be there on dfs.data.dir
-      directories. The datanode will not come up if the permissions are
-      different on existing dfs.data.dir directories. If the directories
-      don't exist, they will be created with this permission.</description>
-  </property>
-
-  <property>
-    <name>dfs.access.time.precision</name>
-    <value>0</value>
-    <description>The access time for HDFS file is precise upto this value.
-      The default value is 1 hour. Setting a value of 0 disables
-      access times for HDFS.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.cluster.administrators</name>
-    <value> hdfs</value>
-    <description>ACL for who all can view the default servlets in the HDFS</description>
-  </property>
-
-  <property>
-    <name>ipc.server.read.threadpool.size</name>
-    <value>5</value>
-    <description></description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.failed.volumes.tolerated</name>
-    <value>0</value>
-    <description>Number of failed disks datanode would tolerate</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.avoid.read.stale.datanode</name>
-    <value>true</value>
-    <description>
-      Indicate whether or not to avoid reading from stale datanodes whose
-      heartbeat messages have not been received by the namenode for more than a
-      specified time interval.
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.avoid.write.stale.datanode</name>
-    <value>true</value>
-    <description>
-      Indicate whether or not to avoid writing to stale datanodes whose
-      heartbeat messages have not been received by the namenode for more than a
-      specified time interval.
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.write.stale.datanode.ratio</name>
-    <value>1.0f</value>
-    <description>When the ratio of number stale datanodes to total datanodes marked is greater
-      than this ratio, stop avoiding writing to stale nodes so as to prevent causing hotspots.
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.stale.datanode.interval</name>
-    <value>30000</value>
-    <description>Datanode is stale after not getting a heartbeat in this interval in ms</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/metainfo.xml
deleted file mode 100644
index 2729261..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/metainfo.xml
+++ /dev/null
@@ -1,46 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Apache Hadoop Distributed File System</comment>
-    <version>1.2.0.1.3.2.0</version>
-
-    <components>
-        <component>
-            <name>NAMENODE</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>DATANODE</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>SECONDARY_NAMENODE</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>HDFS_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HIVE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HIVE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HIVE/configuration/global.xml
deleted file mode 100644
index d9adc80..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HIVE/configuration/global.xml
+++ /dev/null
@@ -1,125 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>hivemetastore_host</name>
-    <value></value>
-    <description>Hive Metastore host.</description>
-  </property>
-  <property>
-    <name>hive_database</name>
-    <value></value>
-    <description>Hive database name.</description>
-  </property>
-  <property>
-    <name>hive_existing_mysql_database</name>
-    <value></value>
-    <description>Hive database name.</description>
-  </property>
-  <property>
-    <name>hive_existing_mysql_host</name>
-    <value></value>
-    <description></description>
-  </property>
-  <property>
-    <name>hive_existing_oracle_database</name>
-    <value></value>
-    <description>Hive database name.</description>
-  </property>
-  <property>
-    <name>hive_existing_oracle_host</name>
-    <value></value>
-    <description></description>
-  </property>
-  <property>
-    <name>hive_ambari_database</name>
-    <value>MySQL</value>
-    <description>Database type.</description>
-  </property>  
-  <property>
-    <name>hive_ambari_host</name>
-    <value></value>
-    <description>Database hostname.</description>
-  </property>
-  <property>
-    <name>hive_database_name</name>
-    <value></value>
-    <description>Database hname</description>
-  </property>    
-  <property>
-    <name>hive_metastore_user_name</name>
-    <value>hive</value>
-    <description>Database username to use to connect to the database.</description>
-  </property>    
-  <property>
-    <name>hive_metastore_user_passwd</name>
-    <value></value>
-    <description>Database password to use to connect to the database.</description>
-  </property>    
-  <property>
-    <name>hive_metastore_port</name>
-    <value>9083</value>
-    <description>Hive Metastore port.</description>
-  </property>    
-  <property>
-    <name>hive_lib</name>
-    <value>/usr/lib/hive/lib/</value>
-    <description>Hive Library.</description>
-  </property>    
-  <property>
-    <name>hive_dbroot</name>
-    <value>/usr/lib/hive/lib/</value>
-    <description>Hive DB Directory.</description>
-  </property>      
-  <property>
-    <name>hive_conf_dir</name>
-    <value>/etc/hive/conf</value>
-    <description>Hive Conf Dir.</description>
-  </property>
-  <property>
-    <name>hive_log_dir</name>
-    <value>/var/log/hive</value>
-    <description>Directory for Hive Log files.</description>
-  </property>
-  <property>
-    <name>hive_pid_dir</name>
-    <value>/var/run/hive</value>
-    <description>Hive PID Dir.</description>
-  </property>
-  <property>
-    <name>mysql_connector_url</name>
-    <value>${download_url}/mysql-connector-java-5.1.18.zip</value>
-    <description>Hive PID Dir.</description>
-  </property>
-  <property>
-    <name>hive_aux_jars_path</name>
-    <value>/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar</value>
-    <description>Hive auxiliary jar path.</description>
-  </property>
-  <property>
-    <name>hive_user</name>
-    <value>hive</value>
-    <description>Hive User.</description>
-  </property>
-  
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HIVE/configuration/hive-site.xml
deleted file mode 100644
index 1337fa4..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HIVE/configuration/hive-site.xml
+++ /dev/null
@@ -1,230 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<configuration>
-  <property>
-    <name>javax.jdo.option.ConnectionURL</name>
-    <value>jdbc</value>
-    <description>JDBC connect string for a JDBC metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionDriverName</name>
-    <value>com.mysql.jdbc.Driver</value>
-    <description>Driver class name for a JDBC metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionUserName</name>
-    <value>hive</value>
-    <description>username to use against metastore database</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionPassword</name>
-    <value> </value>
-    <description>password to use against metastore database</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.warehouse.dir</name>
-    <value>/apps/hive/warehouse</value>
-    <description>location of default database for the warehouse</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.sasl.enabled</name>
-    <value></value>
-    <description>If true, the metastore thrift interface will be secured with SASL.
-      Clients must authenticate with Kerberos.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.kerberos.keytab.file</name>
-    <value></value>
-    <description>The path to the Kerberos Keytab file containing the metastore
-      thrift server's service principal.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.kerberos.principal</name>
-    <value></value>
-    <description>The service principal for the metastore thrift server. The special
-      string _HOST will be replaced automatically with the correct host name.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.cache.pinobjtypes</name>
-    <value>Table,Database,Type,FieldSchema,Order</value>
-    <description>List of comma separated metastore object types that should be pinned in the cache</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.uris</name>
-    <value>thrift://localhost:9083</value>
-    <description>URI for client to contact metastore server</description>
-  </property>
-
-  <property>
-    <name>hive.semantic.analyzer.factory.impl</name>
-    <value>org.apache.hivealog.cli.HCatSemanticAnalyzerFactory</value>
-    <description>controls which SemanticAnalyzerFactory implemenation class is used by CLI</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.client.socket.timeout</name>
-    <value>60</value>
-    <description>MetaStore Client socket timeout in seconds</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.execute.setugi</name>
-    <value>true</value>
-    <description>In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.</description>
-  </property>
-
-  <property>
-    <name>hive.security.authorization.enabled</name>
-    <value>false</value>
-    <description>enable or disable the hive client authorization</description>
-  </property>
-
-  <property>
-    <name>hive.security.authorization.manager</name>
-    <value>org.apache.hcatalog.security.HdfsAuthorizationProvider</value>
-    <description>the hive client authorization manager class name.
-      The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
-  </property>
-
-  <property>
-    <name>hive.server2.enable.doAs</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>fs.hdfs.impl.disable.cache</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>fs.file.impl.disable.cache</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.enforce.bucketing</name>
-    <value>true</value>
-    <description>Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced.</description>
-  </property>
-
-  <property>
-    <name>hive.enforce.sorting</name>
-    <value>true</value>
-    <description>Whether sorting is enforced. If true, while inserting into the table, sorting is enforced.</description>
-  </property>
-
-  <property>
-    <name>hive.map.aggr</name>
-    <value>true</value>
-    <description>Whether to use map-side aggregation in Hive Group By queries.</description>
-  </property>
-
-  <property>
-    <name>hive.optimize.bucketmapjoin</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.optimize.bucketmapjoin.sortedmerge</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.mapred.reduce.tasks.speculative.execution</name>
-    <value>false</value>
-    <description>Whether speculative execution for reducers should be turned on.</description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join</name>
-    <value>true</value>
-    <description>Whether Hive enable the optimization about converting common
-      join into mapjoin based on the input file size.</description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.sortmerge.join</name>
-    <value>true</value>
-    <description>Will the join be automatically converted to a sort-merge join, if the joined tables pass
-      the criteria for sort-merge join.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.sortmerge.join.noconditionaltask</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join.noconditionaltask</name>
-    <value>true</value>
-    <description>Whether Hive enable the optimization about converting common join into mapjoin based on the input file
-      size. If this paramater is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the
-      specified size, the join is directly converted to a mapjoin (there is no conditional task).
-    </description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join.noconditionaltask.size</name>
-    <value>1000000000</value>
-    <description>If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. However, if it
-      is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, the join is directly
-      converted to a mapjoin(there is no conditional task). The default is 10MB.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.optimize.reducededuplication.min.reducer</name>
-    <value>1</value>
-    <description>Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS.
-      That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.
-      The optimization will be disabled if number of reducers is less than specified value.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.optimize.mapjoin.mapreduce</name>
-    <value>true</value>
-    <description>If hive.auto.convert.join is off, this parameter does not take
-      affect. If it is on, and if there are map-join jobs followed by a map-reduce
-      job (for e.g a group by), each map-only job is merged with the following
-      map-reduce job.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.mapjoin.bucket.cache.size</name>
-    <value>10000</value>
-    <description>
-      Size per reducer.The default is 1G, i.e if the input size is 10G, it
-      will use 10 reducers.
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HIVE/metainfo.xml
deleted file mode 100644
index 676c610..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HIVE/metainfo.xml
+++ /dev/null
@@ -1,43 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
-    <version>0.11.0.1.3.2.0</version>
-
-    <components>        
-        <component>
-            <name>HIVE_METASTORE</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>HIVE_SERVER</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>MYSQL_SERVER</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>HIVE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HUE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HUE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HUE/configuration/global.xml
deleted file mode 100644
index c49480f..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HUE/configuration/global.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>hue_pid_dir</name>
-    <value>/var/run/hue</value>
-    <description>Hue Pid Dir.</description>
-  </property>
-  <property>
-    <name>hue_log_dir</name>
-    <value>/var/log/hue</value>
-    <description>Hue Log Dir.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HUE/configuration/hue-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HUE/configuration/hue-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HUE/configuration/hue-site.xml
deleted file mode 100644
index 6eb52a2..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HUE/configuration/hue-site.xml
+++ /dev/null
@@ -1,290 +0,0 @@
-<?xml version="1.0"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more# Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with# contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.# this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0# The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with# (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at# the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0#     http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software# Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,# distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and# See the License for the specific language governing permissions and
-   limitations under the License.# limitations under the License.
--->
-
-<configuration>
-  <!-- General Hue server configuration properties -->
-  <property>
-      <name>send_debug_messages</name>
-      <value>1</value>
-      <description></description>
-  </property>
-
-  <property>
-    <name>database_logging</name>
-    <value>0</value>
-    <description>To show database transactions, set database_logging to 1.
-      default, database_logging=0</description>
-  </property>
-
-  <property>
-    <name>secret_key</name>
-    <value></value>
-    <description>This is used for secure hashing in the session store.</description>
-  </property>
-
-  <property>
-    <name>http_host</name>
-    <value>0.0.0.0</value>
-    <description>Webserver listens on this address and port</description>
-  </property>
-
-  <property>
-    <name>http_port</name>
-    <value>8000</value>
-    <description>Webserver listens on this address and port</description>
-  </property>
-
-  <property>
-    <name>time_zone</name>
-    <value>America/Los_Angeles</value>
-    <description>Time zone name</description>
-  </property>
-
-  <property>
-    <name>django_debug_mode</name>
-    <value>1</value>
-    <description>Turn off debug</description>
-  </property>
-
-  <property>
-    <name>use_cherrypy_server</name>
-    <value>false</value>
-    <description>Set to true to use CherryPy as the webserver, set to false
-      to use Spawning as the webserver. Defaults to Spawning if
-      key is not specified.</description>
-  </property>
-
-  <property>
-    <name>http_500_debug_mode</name>
-    <value>1</value>
-    <description>Turn off backtrace for server error</description>
-  </property>
-
-  <property>
-    <name>server_user</name>
-    <value></value>
-    <description>Webserver runs as this user</description>
-  </property>
-
-  <property>
-    <name>server_group</name>
-    <value></value>
-    <description>Webserver runs as this user</description>
-  </property>
-
-  <property>
-    <name>backend_auth_policy</name>
-    <value>desktop.auth.backend.AllowAllBackend</value>
-    <description>Authentication backend.</description>
-  </property>
-
-  <!-- Hue Database configuration properties -->
-  <property>
-    <name>db_engine</name>
-    <value>mysql</value>
-    <description>Configuration options for specifying the Desktop Database.</description>
-  </property>
-
-  <property>
-    <name>db_host</name>
-    <value>localhost</value>
-    <description>Configuration options for specifying the Desktop Database.</description>
-  </property>
-
-  <property>
-    <name>db_port</name>
-    <value>3306</value>
-    <description>Configuration options for specifying the Desktop Database.</description>
-  </property>
-
-  <property>
-    <name>db_user</name>
-    <value>sandbox</value>
-    <description>Configuration options for specifying the Desktop Database.</description>
-  </property>
-
-  <property>
-    <name>db_password</name>
-    <value>1111</value>
-    <description>Configuration options for specifying the Desktop Database.</description>
-  </property>
-
-  <property>
-    <name>db_name</name>
-    <value>sandbox</value>
-    <description>Configuration options for specifying the Desktop Database.</description>
-  </property>
-
-  <!-- Hue Email configuration properties -->
-  <property>
-    <name>smtp_host</name>
-    <value>localhost</value>
-    <description>The SMTP server information for email notification delivery.</description>
-  </property>
-
-  <property>
-    <name>smtp_port</name>
-    <value>25</value>
-    <description>The SMTP server information for email notification delivery.</description>
-  </property>
-
-  <property>
-    <name>smtp_user</name>
-    <value></value>
-    <description>The SMTP server information for email notification delivery.</description>
-  </property>
-
-  <property>
-    <name>smtp_password</name>
-    <value>25</value>
-    <description>The SMTP server information for email notification delivery.</description>
-  </property>
-
-  <property>
-    <name>tls</name>
-    <value>no</value>
-    <description>Whether to use a TLS (secure) connection when talking to the SMTP server.</description>
-  </property>
-
-  <property>
-    <name>default_from_email</name>
-    <value>sandbox@hortonworks.com</value>
-    <description>The SMTP server information for email notification delivery.</description>
-  </property>
-
-  <!-- Hue Hadoop configuration properties -->
-  <property>
-    <name>fs_defaultfs</name>
-    <value></value>
-    <description>Enter the filesystem uri. E.g
-      .:hdfs://sandbox:8020</description>
-  </property>
-
-  <property>
-    <name>webhdfs_url</name>
-    <value></value>
-    <description>Use WebHdfs/HttpFs as the communication mechanism. To fallback to
-      using the Thrift plugin (used in Hue 1.x), this must be uncommented
-      and explicitly set to the empty value.
-      Value e.g.: http://localhost:50070/webhdfs/v1/</description>
-  </property>
-
-  <property>
-    <name>jobtracker_host</name>
-    <value></value>
-    <description>Enter the host on which you are running the Hadoop JobTracker.</description>
-  </property>
-
-  <property>
-    <name>jobtracker_port</name>
-    <value>50030</value>
-    <description>The port where the JobTracker IPC listens on.</description>
-  </property>
-
-  <property>
-    <name>hadoop_mapred_home</name>
-    <value>/usr/lib/hadoop/lib</value>
-    <description>The SMTP server information for email notification delivery.</description>
-  </property>
-
-  <property>
-    <name>resourcemanager_host</name>
-    <value></value>
-    <description>Enter the host on which you are running the ResourceManager.</description>
-  </property>
-
-  <property>
-    <name>resourcemanager_port</name>
-    <value></value>
-    <description>The port where the ResourceManager IPC listens on.</description>
-  </property>
-
-  <!-- Hue Beeswax configuration properties -->
-  <property>
-    <name>hive_home_dir</name>
-    <value></value>
-    <description>Hive home directory.</description>
-  </property>
-
-  <property>
-    <name>hive_conf_dir</name>
-    <value></value>
-    <description>Hive configuration directory, where hive-site.xml is
-      located.</description>
-  </property>
-
-  <property>
-    <name>templeton_url</name>
-    <value></value>
-    <description>WebHcat http URL</description>
-  </property>
-
-  <!-- Hue shell types configuration -->
-  <property>
-    <name>pig_nice_name</name>
-    <value></value>
-    <description>Define and configure a new shell type pig</description>
-  </property>
-
-  <property>
-    <name>pig_shell_command</name>
-    <value>/usr/bin/pig -l /dev/null</value>
-    <description>Define and configure a new shell type pig.</description>
-  </property>
-
-  <property>
-    <name>pig_java_home</name>
-    <value></value>
-    <description>Define and configure a new shell type pig.</description>
-  </property>
-
-  <property>
-    <name>hbase_nice_name</name>
-    <value>HBase Shell</value>
-    <description>Define and configure a new shell type hbase</description>
-  </property>
-
-  <property>
-    <name>hbase_shell_command</name>
-    <value>/usr/bin/hbase shell</value>
-    <description>Define and configure a new shell type hbase.</description>
-  </property>
-
-  <property>
-    <name>bash_nice_name</name>
-    <value></value>
-    <description>Define and configure a new shell type bash for testing
-      only</description>
-  </property>
-
-  <property>
-    <name>bash_shell_command</name>
-    <value>/bin/bash</value>
-    <description>Define and configure a new shell type bash for testing only
-      .</description>
-  </property>
-
-  <!-- Hue Settings for the User Admin application -->
-  <property>
-    <name>whitelist</name>
-    <value>(localhost|127\.0\.0\.1):(50030|50070|50060|50075|50111)</value>
-    <description>proxy settings</description>
-  </property>
-
-</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HUE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HUE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HUE/metainfo.xml
deleted file mode 100644
index 56654df..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HUE/metainfo.xml
+++ /dev/null
@@ -1,31 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Hue is a graphical user interface to operate and develop
-      applications for Apache Hadoop.</comment>
-    <version>2.2.0.1.3.2.0</version>
-
-    <components>
-        <component>
-            <name>HUE_SERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/MAPREDUCE/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/MAPREDUCE/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/MAPREDUCE/configuration/capacity-scheduler.xml
deleted file mode 100644
index 8034d19..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/MAPREDUCE/configuration/capacity-scheduler.xml
+++ /dev/null
@@ -1,195 +0,0 @@
-<?xml version="1.0"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- This is the configuration file for the resource manager in Hadoop. -->
-<!-- You can configure various scheduling parameters related to queues. -->
-<!-- The properties for a queue follow a naming convention,such as, -->
-<!-- mapred.capacity-scheduler.queue.<queue-name>.property-name. -->
-
-<configuration>
-
-  <property>
-    <name>mapred.capacity-scheduler.maximum-system-jobs</name>
-    <value>3000</value>
-    <description>Maximum number of jobs in the system which can be initialized,
-     concurrently, by the CapacityScheduler.
-    </description>    
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.capacity</name>
-    <value>100</value>
-    <description>Percentage of the number of slots in the cluster that are
-      to be available for jobs in this queue.
-    </description>    
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.maximum-capacity</name>
-    <value>-1</value>
-    <description>
-	maximum-capacity defines a limit beyond which a queue cannot use the capacity of the cluster.
-	This provides a means to limit how much excess capacity a queue can use. By default, there is no limit.
-	The maximum-capacity of a queue can only be greater than or equal to its minimum capacity.
-        Default value of -1 implies a queue can use complete capacity of the cluster.
-
-        This property could be to curtail certain jobs which are long running in nature from occupying more than a 
-        certain percentage of the cluster, which in the absence of pre-emption, could lead to capacity guarantees of 
-        other queues being affected.
-        
-        One important thing to note is that maximum-capacity is a percentage , so based on the cluster's capacity
-        the max capacity would change. So if large no of nodes or racks get added to the cluster , max Capacity in 
-        absolute terms would increase accordingly.
-    </description>    
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.supports-priority</name>
-    <value>false</value>
-    <description>If true, priorities of jobs will be taken into 
-      account in scheduling decisions.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.minimum-user-limit-percent</name>
-    <value>100</value>
-    <description> Each queue enforces a limit on the percentage of resources 
-    allocated to a user at any given time, if there is competition for them. 
-    This user limit can vary between a minimum and maximum value. The former
-    depends on the number of users who have submitted jobs, and the latter is
-    set to this property value. For example, suppose the value of this 
-    property is 25. If two users have submitted jobs to a queue, no single 
-    user can use more than 50% of the queue resources. If a third user submits
-    a job, no single user can use more than 33% of the queue resources. With 4 
-    or more users, no user can use more than 25% of the queue's resources. A 
-    value of 100 implies no user limits are imposed. 
-    </description>
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.user-limit-factor</name>
-    <value>1</value>
-    <description>The multiple of the queue capacity which can be configured to 
-    allow a single user to acquire more slots. 
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks</name>
-    <value>200000</value>
-    <description>The maximum number of tasks, across all jobs in the queue, 
-    which can be initialized concurrently. Once the queue's jobs exceed this 
-    limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks-per-user</name>
-    <value>100000</value>
-    <description>The maximum number of tasks per-user, across all the of the 
-    user's jobs in the queue, which can be initialized concurrently. Once the 
-    user's jobs exceed this limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.init-accept-jobs-factor</name>
-    <value>10</value>
-    <description>The multipe of (maximum-system-jobs * queue-capacity) used to 
-    determine the number of jobs which are accepted by the scheduler.  
-    </description>
-  </property>
-
-  <!-- The default configuration settings for the capacity task scheduler -->
-  <!-- The default values would be applied to all the queues which don't have -->
-  <!-- the appropriate property for the particular queue -->
-  <property>
-    <name>mapred.capacity-scheduler.default-supports-priority</name>
-    <value>false</value>
-    <description>If true, priorities of jobs will be taken into 
-      account in scheduling decisions by default in a job queue.
-    </description>
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.default-minimum-user-limit-percent</name>
-    <value>100</value>
-    <description>The percentage of the resources limited to a particular user
-      for the job queue at any given point of time by default.
-    </description>
-  </property>
-
-
-  <property>
-    <name>mapred.capacity-scheduler.default-user-limit-factor</name>
-    <value>1</value>
-    <description>The default multiple of queue-capacity which is used to 
-    determine the amount of slots a single user can consume concurrently.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.default-maximum-active-tasks-per-queue</name>
-    <value>200000</value>
-    <description>The default maximum number of tasks, across all jobs in the 
-    queue, which can be initialized concurrently. Once the queue's jobs exceed 
-    this limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.default-maximum-active-tasks-per-user</name>
-    <value>100000</value>
-    <description>The default maximum number of tasks per-user, across all the of 
-    the user's jobs in the queue, which can be initialized concurrently. Once 
-    the user's jobs exceed this limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.default-init-accept-jobs-factor</name>
-    <value>10</value>
-    <description>The default multipe of (maximum-system-jobs * queue-capacity) 
-    used to determine the number of jobs which are accepted by the scheduler.  
-    </description>
-  </property>
-
-  <!-- Capacity scheduler Job Initialization configuration parameters -->
-  <property>
-    <name>mapred.capacity-scheduler.init-poll-interval</name>
-    <value>5000</value>
-    <description>The amount of time in miliseconds which is used to poll 
-    the job queues for jobs to initialize.
-    </description>
-  </property>
-  <property>
-    <name>mapred.capacity-scheduler.init-worker-threads</name>
-    <value>5</value>
-    <description>Number of worker threads which would be used by
-    Initialization poller to initialize jobs in a set of queue.
-    If number mentioned in property is equal to number of job queues
-    then a single thread would initialize jobs in a queue. If lesser
-    then a thread would get a set of queues assigned. If the number
-    is greater then number of threads would be equal to number of 
-    job queues.
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/MAPREDUCE/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/MAPREDUCE/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/MAPREDUCE/configuration/core-site.xml
deleted file mode 100644
index 3a2af49..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/MAPREDUCE/configuration/core-site.xml
+++ /dev/null
@@ -1,20 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<configuration>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/MAPREDUCE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/MAPREDUCE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/MAPREDUCE/configuration/global.xml
deleted file mode 100644
index 2fc33c9..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/MAPREDUCE/configuration/global.xml
+++ /dev/null
@@ -1,160 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>jobtracker_host</name>
-    <value></value>
-    <description>JobTracker Host.</description>
-  </property>
-  <property>
-    <name>tasktracker_hosts</name>
-    <value></value>
-    <description>TaskTracker hosts.</description>
-  </property>
-  <property>
-    <name>mapred_local_dir</name>
-    <value>/hadoop/mapred</value>
-    <description>MapRed Local Directories.</description>
-  </property>
-  <property>
-    <name>mapred_system_dir</name>
-    <value>/mapred/system</value>
-    <description>MapRed System Directories.</description>
-  </property>
-  <property>
-    <name>scheduler_name</name>
-    <value>org.apache.hadoop.mapred.CapacityTaskScheduler</value>
-    <description>MapRed Capacity Scheduler.</description>
-  </property>
-  <property>
-    <name>jtnode_opt_newsize</name>
-    <value>200</value>
-    <description>Mem New Size.</description>
-  </property>
-  <property>
-    <name>jtnode_opt_maxnewsize</name>
-    <value>200</value>
-    <description>Max New size.</description>
-  </property>
-  <property>
-    <name>hadoop_heapsize</name>
-    <value>1024</value>
-    <description>Hadoop maximum Java heap size</description>
-  </property>
-  <property>
-    <name>jtnode_heapsize</name>
-    <value>1024</value>
-    <description>Maximum Java heap size for JobTracker in MB (Java option -Xmx)</description>
-  </property>
-  <property>
-    <name>mapred_map_tasks_max</name>
-    <value>4</value>
-    <description>Number of slots that Map tasks that run simultaneously can occupy on a TaskTracker</description>
-  </property>
-  <property>
-    <name>mapred_red_tasks_max</name>
-    <value>2</value>
-    <description>Number of slots that Reduce tasks that run simultaneously can occupy on a TaskTracker</description>
-  </property>
-  <property>
-    <name>mapred_cluster_map_mem_mb</name>
-    <value>-1</value>
-    <description>The virtual memory size of a single Map slot in the MapReduce framework</description>
-  </property>
-  <property>
-    <name>mapred_cluster_red_mem_mb</name>
-    <value>-1</value>
-    <description>The virtual memory size of a single Reduce slot in the MapReduce framework</description>
-  </property>
-  <property>
-    <name>mapred_job_map_mem_mb</name>
-    <value>-1</value>
-    <description>Virtual memory for single Map task</description>
-  </property>
-  <property>
-    <name>mapred_child_java_opts_sz</name>
-    <value>768</value>
-    <description>Java options for the TaskTracker child processes.</description>
-  </property>
-  <property>
-    <name>io_sort_mb</name>
-    <value>200</value>
-    <description>The total amount of Map-side buffer memory to use while sorting files (Expert-only configuration).</description>
-  </property>
-  <property>
-    <name>io_sort_spill_percent</name>
-    <value>0.9</value>
-    <description>Percentage of sort buffer used for record collection (Expert-only configuration.</description>
-  </property>
-  <property>
-    <name>mapreduce_userlog_retainhours</name>
-    <value>24</value>
-    <description>The maximum time, in hours, for which the user-logs are to be retained after the job completion.</description>
-  </property>
-  <property>
-    <name>maxtasks_per_job</name>
-    <value>-1</value>
-    <description>Maximum number of tasks for a single Job</description>
-  </property>
-  <property>
-    <name>lzo_enabled</name>
-    <value>true</value>
-    <description>LZO compression enabled</description>
-  </property>
-  <property>
-    <name>snappy_enabled</name>
-    <value>true</value>
-    <description>LZO compression enabled</description>
-  </property>
-  <property>
-    <name>rca_enabled</name>
-    <value>true</value>
-    <description>Enable Job Diagnostics.</description>
-  </property>
-  <property>
-    <name>mapred_hosts_exclude</name>
-    <value></value>
-    <description>Exclude entered hosts</description>
-  </property>
-  <property>
-    <name>mapred_hosts_include</name>
-    <value></value>
-    <description>Include entered hosts</description>
-  </property>
-  <property>
-    <name>mapred_jobstatus_dir</name>
-    <value>file:////mapred/jobstatus</value>
-    <description>Job Status directory</description>
-  </property>
-  <property>
-    <name>task_controller</name>
-    <value>org.apache.hadoop.mapred.DefaultTaskController</value>
-    <description>Task Controller.</description>
-  </property>
-  <property>
-    <name>mapred_user</name>
-    <value>mapred</value>
-    <description>MapReduce User.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/MAPREDUCE/configuration/mapred-queue-acls.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/MAPREDUCE/configuration/mapred-queue-acls.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/MAPREDUCE/configuration/mapred-queue-acls.xml
deleted file mode 100644
index ce12380..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/MAPREDUCE/configuration/mapred-queue-acls.xml
+++ /dev/null
@@ -1,39 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- mapred-queue-acls.xml -->
-<configuration>
-
-
-<!-- queue default -->
-
-  <property>
-    <name>mapred.queue.default.acl-submit-job</name>
-    <value>*</value>
-  </property>
-
-  <property>
-    <name>mapred.queue.default.acl-administer-jobs</name>
-    <value>*</value>
-  </property>
-
-  <!-- END ACLs -->
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/MAPREDUCE/configuration/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/MAPREDUCE/configuration/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/MAPREDUCE/configuration/mapred-site.xml
deleted file mode 100644
index 743ca6a..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/MAPREDUCE/configuration/mapred-site.xml
+++ /dev/null
@@ -1,574 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-  <!-- i/o properties -->
-
-  <property>
-    <name>io.sort.mb</name>
-    <value>200</value>
-    <description>
-      The total amount of Map-side buffer memory to use while sorting files
-    </description>
-  </property>
-
-  <property>
-    <name>io.sort.record.percent</name>
-    <value>.2</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>io.sort.spill.percent</name>
-    <value>0.9</value>
-    <description>Percentage of sort buffer used for record collection</description>
-  </property>
-
-  <property>
-    <name>io.sort.factor</name>
-    <value>100</value>
-    <description>No description</description>
-  </property>
-
-  <!-- map/reduce properties -->
-
-  <property>
-    <name>mapred.tasktracker.tasks.sleeptime-before-sigkill</name>
-    <value>250</value>
-    <description>Normally, this is the amount of time before killing
-      processes, and the recommended-default is 5.000 seconds - a value of
-      5000 here.  In this case, we are using it solely to blast tasks before
-      killing them, and killing them very quickly (1/4 second) to guarantee
-      that we do not leave VMs around for later jobs.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker.handler.count</name>
-    <value>50</value>
-    <description>
-      The number of server threads for the JobTracker. This should be roughly
-      4% of the number of tasktracker nodes.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.system.dir</name>
-    <value>/mapred/system</value>
-    <description>Path on the HDFS where where the MapReduce framework stores system files</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker</name>
-    <!-- cluster variant -->
-    <value>localhost:50300</value>
-    <description>JobTracker address</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker.http.address</name>
-    <!-- cluster variant -->
-    <value>localhost:50030</value>
-    <description>Http address for JobTracker</description>
-    <final>true</final>
-  </property>
-
-
-  <property>
-    <!-- cluster specific -->
-    <name>mapred.local.dir</name>
-    <value>/hadoop/mapred</value>
-    <description>No description</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapreduce.cluster.administrators</name>
-    <value> hadoop</value>
-  </property>
-
-  <property>
-    <name>mapred.reduce.parallel.copies</name>
-    <value>30</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.tasktracker.map.tasks.maximum</name>
-    <value>4</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.tasktracker.reduce.tasks.maximum</name>
-    <value>2</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>tasktracker.http.threads</name>
-    <value>50</value>
-  </property>
-
-  <property>
-    <name>mapred.map.tasks.speculative.execution</name>
-    <value>false</value>
-    <description>If true, then multiple instances of some map tasks
-      may be executed in parallel.</description>
-  </property>
-
-  <property>
-    <name>mapred.reduce.tasks.speculative.execution</name>
-    <value>false</value>
-    <description>If true, then multiple instances of some reduce tasks
-      may be executed in parallel.</description>
-  </property>
-
-  <property>
-    <name>mapred.reduce.slowstart.completed.maps</name>
-    <value>0.05</value>
-  </property>
-
-  <property>
-    <name>mapred.inmem.merge.threshold</name>
-    <value>1000</value>
-    <description>The threshold, in terms of the number of files
-      for the in-memory merge process. When we accumulate threshold number of files
-      we initiate the in-memory merge and spill to disk. A value of 0 or less than
-      0 indicates we want to DON'T have any threshold and instead depend only on
-      the ramfs's memory consumption to trigger the merge.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.shuffle.merge.percent</name>
-    <value>0.66</value>
-    <description>The usage threshold at which an in-memory merge will be
-      initiated, expressed as a percentage of the total memory allocated to
-      storing in-memory map outputs, as defined by
-      mapred.job.shuffle.input.buffer.percent.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.shuffle.input.buffer.percent</name>
-    <value>0.7</value>
-    <description>The percentage of memory to be allocated from the maximum heap
-      size to storing map outputs during the shuffle.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.map.output.compression.codec</name>
-    <value>org.apache.hadoop.io.compress.SnappyCodec</value>
-    <description>If the map outputs are compressed, how should they be
-      compressed
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.output.compression.type</name>
-    <value>BLOCK</value>
-    <description>If the job outputs are to compressed as SequenceFiles, how should
-      they be compressed? Should be one of NONE, RECORD or BLOCK.
-    </description>
-  </property>
-
-
-  <property>
-    <name>mapred.jobtracker.completeuserjobs.maximum</name>
-    <value>5</value>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.taskScheduler</name>
-    <value>org.apache.hadoop.mapred.CapacityTaskScheduler</value>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.restart.recover</name>
-    <value>false</value>
-    <description>"true" to enable (job) recovery upon restart,
-      "false" to start afresh
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.reduce.input.buffer.percent</name>
-    <value>0.0</value>
-    <description>The percentage of memory- relative to the maximum heap size- to
-      retain map outputs during the reduce. When the shuffle is concluded, any
-      remaining map outputs in memory must consume less than this threshold before
-      the reduce can begin.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.input.limit</name>
-    <value>10737418240</value>
-    <description>The limit on the input size of the reduce. (This value
-      is 10 Gb.)  If the estimated input size of the reduce is greater than
-      this value, job is failed. A value of -1 means that there is no limit
-      set. </description>
-  </property>
-
-
-  <!-- copied from kryptonite configuration -->
-  <property>
-    <name>mapred.compress.map.output</name>
-    <value></value>
-  </property>
-
-
-  <property>
-    <name>mapred.task.timeout</name>
-    <value>600000</value>
-    <description>The number of milliseconds before a task will be
-      terminated if it neither reads an input, writes an output, nor
-      updates its status string.
-    </description>
-  </property>
-
-  <property>
-    <name>jetty.connector</name>
-    <value>org.mortbay.jetty.nio.SelectChannelConnector</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.task.tracker.task-controller</name>
-    <value>org.apache.hadoop.mapred.DefaultTaskController</value>
-    <description>
-      TaskController which is used to launch and manage task execution.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.child.root.logger</name>
-    <value>INFO,TLA</value>
-  </property>
-
-  <property>
-    <name>mapred.child.java.opts</name>
-    <value></value>
-
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.cluster.map.memory.mb</name>
-    <value>1536</value>
-    <description>
-      The virtual memory size of a single Map slot in the MapReduce framework
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.cluster.reduce.memory.mb</name>
-    <value>2048</value>
-    <description>
-      The virtual memory size of a single Reduce slot in the MapReduce framework
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.map.memory.mb</name>
-    <value>1536</value>
-    <description>
-      Virtual memory for single Map task
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.reduce.memory.mb</name>
-    <value>2048</value>
-    <description>
-      Virtual memory for single Reduce task
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.cluster.max.map.memory.mb</name>
-    <value>6144</value>
-    <description>
-      Upper limit on virtual memory size for a single Map task of any MapReduce job
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.cluster.max.reduce.memory.mb</name>
-    <value>4096</value>
-    <description>
-      Upper limit on virtual memory size for a single Reduce task of any MapReduce job
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.hosts</name>
-    <value>/etc/hadoop/conf/mapred.include</value>
-    <description>
-      Names a file that contains the list of nodes that may
-      connect to the jobtracker.  If the value is empty, all hosts are
-      permitted.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.hosts.exclude</name>
-    <value>/etc/hadoop/conf/mapred.exclude</value>
-    <description>
-      Names a file that contains the list of hosts that
-      should be excluded by the jobtracker.  If the value is empty, no
-      hosts are excluded.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.max.tracker.blacklists</name>
-    <value>16</value>
-    <description>
-      if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.healthChecker.script.path</name>
-    <value>file:////mapred/jobstatus</value>
-    <description>
-      Directory path to view job status
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.healthChecker.interval</name>
-    <value>135000</value>
-  </property>
-
-  <property>
-    <name>mapred.healthChecker.script.timeout</name>
-    <value>60000</value>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker.persist.jobstatus.active</name>
-    <value>false</value>
-    <description>Indicates if persistency of job status information is
-      active or not.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker.persist.jobstatus.hours</name>
-    <value>1</value>
-    <description>The number of hours job status information is persisted in DFS.
-      The job status information will be available after it drops of the memory
-      queue and between jobtracker restarts. With a zero value the job status
-      information is not persisted at all in DFS.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker.persist.jobstatus.dir</name>
-    <value>/etc/hadoop/conf/health_check</value>
-    <description>The directory where the job status information is persisted
-      in a file system to be available after it drops of the memory queue and
-      between jobtracker restarts.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.retirejob.check</name>
-    <value>10000</value>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.retirejob.interval</name>
-    <value>21600000</value>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker.history.completed.location</name>
-    <value>/mapred/history/done</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.task.maxvmem</name>
-    <value></value>
-    <final>true</final>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.maxtasks.per.job</name>
-    <value>-1</value>
-    <final>true</final>
-    <description>The maximum number of tasks for a single job.
-      A value of -1 indicates that there is no maximum.  </description>
-  </property>
-
-  <property>
-    <name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name>
-    <value>false</value>
-  </property>
-
-  <property>
-    <name>mapred.userlog.retain.hours</name>
-    <value>24</value>
-    <description>
-      The maximum time, in hours, for which the user-logs are to be retained after the job completion.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.reuse.jvm.num.tasks</name>
-    <value>1</value>
-    <description>
-      How many tasks to run per jvm. If set to -1, there is no limit
-    </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapreduce.jobtracker.kerberos.principal</name>
-    <value></value>
-    <description>
-      JT user name key.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.tasktracker.kerberos.principal</name>
-    <value></value>
-    <description>
-      tt user name key. "_HOST" is replaced by the host name of the task tracker.
-    </description>
-  </property>
-
-
-  <property>
-    <name>hadoop.job.history.user.location</name>
-    <value>none</value>
-    <final>true</final>
-  </property>
-
-
-  <property>
-    <name>mapreduce.jobtracker.keytab.file</name>
-    <value></value>
-    <description>
-      The keytab for the jobtracker principal.
-    </description>
-
-  </property>
-
-  <property>
-    <name>mapreduce.tasktracker.keytab.file</name>
-    <value></value>
-    <description>The filename of the keytab for the task tracker</description>
-  </property>
-
-  <property>
-    <name>mapred.task.tracker.http.address</name>
-    <value></value>
-    <description>Http address for task tracker.</description>
-  </property>
-
-  <property>
-    <name>mapreduce.jobtracker.staging.root.dir</name>
-    <value>/user</value>
-    <description>The Path prefix for where the staging directories should be placed. The next level is always the user's
-      name. It is a path in the default file system.</description>
-  </property>
-
-  <property>
-    <name>mapreduce.tasktracker.group</name>
-    <value>hadoop</value>
-    <description>The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.</description>
-
-  </property>
-
-  <property>
-    <name>mapreduce.jobtracker.split.metainfo.maxsize</name>
-    <value>50000000</value>
-    <final>true</final>
-    <description>If the size of the split metainfo file is larger than this, the JobTracker will fail the job during
-      initialize.
-    </description>
-  </property>
-  <property>
-    <name>mapreduce.history.server.embedded</name>
-    <value>false</value>
-    <description>Should job history server be embedded within Job tracker
-      process</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapreduce.history.server.http.address</name>
-    <!-- cluster variant -->
-    <value>localhost:51111</value>
-    <description>Http address of the history server</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapreduce.jobhistory.kerberos.principal</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>Job history user name key. (must map to same user as JT
-      user)</description>
-  </property>
-
-  <property>
-    <name>mapreduce.jobhistory.keytab.file</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>The keytab for the job history server principal.</description>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.blacklist.fault-timeout-window</name>
-    <value>180</value>
-    <description>
-      3-hour sliding window (value is in minutes)
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.blacklist.fault-bucket-width</name>
-    <value>15</value>
-    <description>
-      15-minute bucket size (value is in minutes)
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.queue.names</name>
-    <value>default</value>
-    <description> Comma separated list of queues configured for this jobtracker.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/MAPREDUCE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/MAPREDUCE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/MAPREDUCE/metainfo.xml
deleted file mode 100644
index 757bbb5..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/MAPREDUCE/metainfo.xml
+++ /dev/null
@@ -1,41 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>mapred</user>
-    <comment>Apache Hadoop Distributed Processing Framework</comment>
-    <version>1.2.0.1.3.2.0</version>
-
-    <components>
-        <component>
-            <name>JOBTRACKER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>TASKTRACKER</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>MAPREDUCE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/NAGIOS/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/NAGIOS/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/NAGIOS/configuration/global.xml
deleted file mode 100644
index 61a2b90..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/NAGIOS/configuration/global.xml
+++ /dev/null
@@ -1,50 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>nagios_user</name>
-    <value>nagios</value>
-    <description>Nagios Username.</description>
-  </property>
-  <property>
-    <name>nagios_group</name>
-    <value>nagios</value>
-    <description>Nagios Group.</description>
-  </property>
-  <property>
-    <name>nagios_web_login</name>
-    <value>nagiosadmin</value>
-    <description>Nagios web user.</description>
-  </property>
-  <property>
-    <name>nagios_web_password</name>
-    <value></value>
-    <description>Nagios Admin Password.</description>
-  </property>
-  <property>
-    <name>nagios_contact</name>
-    <value></value>
-    <description>Hadoop Admin Email.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/NAGIOS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/NAGIOS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/NAGIOS/metainfo.xml
deleted file mode 100644
index 76471cf..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/NAGIOS/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Nagios Monitoring and Alerting system</comment>
-    <version>3.5.0</version>
-
-    <components>
-        <component>
-            <name>NAGIOS_SERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-</metainfo>


[12/14] AMBARI-3777. Remove HDPLocal stack from stack definition. (yusaku)

Posted by yu...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HDFS/configuration/core-site.xml
deleted file mode 100644
index acf3e4c..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HDFS/configuration/core-site.xml
+++ /dev/null
@@ -1,251 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
- <!--
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
- 
-        http://www.apache.org/licenses/LICENSE-2.0
- 
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
- -->
- 
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-<!-- i/o properties -->
-
-  <property>
-    <name>io.file.buffer.size</name>
-    <value>131072</value>
-    <description>The size of buffer for use in sequence files.
-  The size of this buffer should probably be a multiple of hardware
-  page size (4096 on Intel x86), and it determines how much data is
-  buffered during read and write operations.</description>
-  </property>
-
-  <property>
-    <name>io.serializations</name>
-    <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
-  </property>
-
-  <property>
-    <name>io.compression.codecs</name>
-    <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.SnappyCodec</value>
-    <description>A list of the compression codec classes that can be used
-                 for compression/decompression.</description>
-  </property>
-
-  <property>
-    <name>io.compression.codec.lzo.class</name>
-    <value>com.hadoop.compression.lzo.LzoCodec</value>
-    <description>The implementation for lzo codec.</description>
-  </property>
-
-<!-- file system properties -->
-
-  <property>
-    <name>fs.default.name</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>The name of the default file system.  Either the
-  literal string "local" or a host:port for NDFS.</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>fs.trash.interval</name>
-    <value>360</value>
-    <description>Number of minutes between trash checkpoints.
-  If zero, the trash feature is disabled.
-  </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.dir</name>
-    <value></value>
-    <description>Determines where on the local filesystem the DFS secondary
-        name node should store the temporary images to merge.
-        If this is a comma-delimited list of directories then the image is
-        replicated in all of the directories for redundancy.
-    </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.edits.dir</name>
-    <value>${fs.checkpoint.dir}</value>
-    <description>Determines where on the local filesystem the DFS secondary
-        name node should store the temporary edits to merge.
-        If this is a comma-delimited list of directoires then teh edits is
-        replicated in all of the directoires for redundancy.
-        Default value is same as fs.checkpoint.dir
-    </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.period</name>
-    <value>21600</value>
-    <description>The number of seconds between two periodic checkpoints.
-  </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.size</name>
-    <value>536870912</value>
-    <description>The size of the current edit log (in bytes) that triggers
-       a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
-  </description>
-  </property>
-
-  <!-- ipc properties: copied from kryptonite configuration -->
-  <property>
-    <name>ipc.client.idlethreshold</name>
-    <value>8000</value>
-    <description>Defines the threshold number of connections after which
-               connections will be inspected for idleness.
-  </description>
-  </property>
-
-  <property>
-    <name>ipc.client.connection.maxidletime</name>
-    <value>30000</value>
-    <description>The maximum time after which a client will bring down the
-               connection to the server.
-  </description>
-  </property>
-
-  <property>
-    <name>ipc.client.connect.max.retries</name>
-    <value>50</value>
-    <description>Defines the maximum number of retries for IPC connections.</description>
-  </property>
-
-  <!-- Web Interface Configuration -->
-  <property>
-    <name>webinterface.private.actions</name>
-    <value>false</value>
-    <description> If set to true, the web interfaces of JT and NN may contain
-                actions, such as kill job, delete file, etc., that should
-                not be exposed to public. Enable this option if the interfaces
-                are only reachable by those who have the right authorization.
-  </description>
-  </property>
-
- <property>
-   <name>hadoop.security.authentication</name>
-   <value>simple</value>
-   <description>
-   Set the authentication for the cluster. Valid values are: simple or
-   kerberos.
-   </description>
- </property>
-<property>
-  <name>hadoop.security.authorization</name>
-  <value></value>
-  <description>
-     Enable authorization for different protocols.
-  </description>
-</property>
-
-  <property>
-    <name>hadoop.security.auth_to_local</name>
-    <value></value>
-<description>The mapping from kerberos principal names to local OS user names.
-  So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
-  "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
-The translations rules have 3 sections:
-      base     filter    substitution
-The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
-
-[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
-[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
-[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
-
-The filter is a regex in parens that must the generated string for the rule to apply.
-
-"(.*%admin)" will take any string that ends in "%admin"
-"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
-
-Finally, the substitution is a sed rule to translate a regex into a fixed string.
-
-"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
-"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
-"s/X/Y/g" replaces all of the "X" in the name with "Y"
-
-So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
-
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
-
-To also translate the names with a second component, you'd make the rules:
-
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-RULE:[2:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
-
-If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
-
-RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
-DEFAULT
-    </description>
-  </property>
-
-<!--
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("hcat_user").groups</name>
-  <value></value>
-  <description>
-     Proxy group for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("hcat_user").hosts</name>
-  <value></value>
-  <description>
-     Proxy host for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("oozie_user").groups</name>
-  <value></value>
-  <description>
-     Proxy group for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("oozie_user").hosts</name>
-  <value></value>
-  <description>
-     Proxy host for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("templeton_user").groups</name>
-  <value></value>
-  <description>
-    Proxy group for templeton.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("templeton_user").hosts</name>
-  <value></value>
-  <description>
-    Proxy host for templeton.
-  </description>
-</property>
--->
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HDFS/configuration/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HDFS/configuration/hadoop-policy.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HDFS/configuration/hadoop-policy.xml
deleted file mode 100644
index 900da99..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HDFS/configuration/hadoop-policy.xml
+++ /dev/null
@@ -1,134 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientProtocol, which is used by user code
-    via the DistributedFileSystem.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.client.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
-    for block recovery.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for DatanodeProtocol, which is used by datanodes to
-    communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
-    for updating generation timestamp.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.namenode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for NamenodeProtocol, the protocol used by the secondary
-    namenode to communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.tracker.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterTrackerProtocol, used by the tasktrackers to
-    communicate with the jobtracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.job.submission.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for JobSubmissionProtocol, used by job clients to
-    communciate with the jobtracker for job submission, querying job status etc.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.task.umbilical.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
-    tasks to communicate with the parent tasktracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
- <property>
-    <name>security.admin.operations.protocol.acl</name>
-    <value></value>
-    <description>ACL for AdminOperationsProtocol. Used for admin commands.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.refresh.usertogroups.mappings.protocol.acl</name>
-    <value></value>
-    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
-    users mappings. The ACL is a comma-separated list of user and
-    group names. The user and group list is separated by a blank. For
-    e.g. "alice,bob users,wheel".  A special value of "*" means all
-    users are allowed.</description>
-  </property>
-
-<property>
-    <name>security.refresh.policy.protocol.acl</name>
-    <value></value>
-    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
-    dfsadmin and mradmin commands to refresh the security policy in-effect.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HDFS/configuration/hdfs-site.xml
deleted file mode 100644
index db92d4b..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HDFS/configuration/hdfs-site.xml
+++ /dev/null
@@ -1,415 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-<!-- file system properties -->
-
-  <property>
-    <name>dfs.name.dir</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>Determines where on the local filesystem the DFS name node
-      should store the name table.  If this is a comma-delimited list
-      of directories then the name table is replicated in all of the
-      directories, for redundancy. </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.support.append</name>
-    <value></value>
-    <description>to enable dfs append</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.webhdfs.enabled</name>
-    <value></value>
-    <description>to enable webhdfs</description>
-    <final>true</final>
-  </property>
-
- <property>
-    <name>dfs.datanode.socket.write.timeout</name>
-    <value>0</value>
-    <description>DFS Client write socket timeout</description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.failed.volumes.tolerated</name>
-    <value></value>
-    <description>#of failed disks dn would tolerate</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.block.local-path-access.user</name>
-    <value></value>
-    <description>the user who is allowed to perform short
-    circuit reads.
-    </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.data.dir</name>
-    <value></value>
-    <description>Determines where on the local filesystem an DFS data node
-  should store its blocks.  If this is a comma-delimited
-  list of directories, then data will be stored in all named
-  directories, typically on different devices.
-  Directories that do not exist are ignored.
-  </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.hosts.exclude</name>
-    <value></value>
-    <description>Names a file that contains a list of hosts that are
-    not permitted to connect to the namenode.  The full pathname of the
-    file must be specified.  If the value is empty, no hosts are
-    excluded.</description>
-  </property>
-
-  <property>
-    <name>dfs.hosts</name>
-    <value></value>
-    <description>Names a file that contains a list of hosts that are
-    permitted to connect to the namenode. The full pathname of the file
-    must be specified.  If the value is empty, all hosts are
-    permitted.</description>
-  </property>
-
-  <property>
-    <name>dfs.replication.max</name>
-    <value>50</value>
-    <description>Maximal block replication.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.replication</name>
-    <value></value>
-    <description>Default block replication.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.heartbeat.interval</name>
-    <value>3</value>
-    <description>Determines datanode heartbeat interval in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.safemode.threshold.pct</name>
-    <value>1.0f</value>
-    <description>
-        Specifies the percentage of blocks that should satisfy
-        the minimal replication requirement defined by dfs.replication.min.
-        Values less than or equal to 0 mean not to start in safe mode.
-        Values greater than 1 will make safe mode permanent.
-        </description>
-  </property>
-
-  <property>
-    <name>dfs.balance.bandwidthPerSec</name>
-    <value>6250000</value>
-    <description>
-        Specifies the maximum amount of bandwidth that each datanode
-        can utilize for the balancing purpose in term of
-        the number of bytes per second.
-  </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.address</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>dfs.datanode.http.address</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>dfs.block.size</name>
-    <value>134217728</value>
-    <description>The default block size for new files.</description>
-  </property>
-
-  <property>
-    <name>dfs.http.address</name>
-    <value></value>
-<description>The name of the default file system.  Either the
-literal string "local" or a host:port for NDFS.</description>
-<final>true</final>
-</property>
-
-<property>
-<name>dfs.datanode.du.reserved</name>
-<!-- cluster variant -->
-<value></value>
-<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
-</description>
-</property>
-
-<property>
-<name>dfs.datanode.ipc.address</name>
-<value>0.0.0.0:8010</value>
-<description>
-The datanode ipc server address and port.
-If the port is 0 then the server will start on a free port.
-</description>
-</property>
-
-<property>
-<name>dfs.blockreport.initialDelay</name>
-<value>120</value>
-<description>Delay for first block report in seconds.</description>
-</property>
-
-<property>
-<name>dfs.datanode.du.pct</name>
-<value>0.85f</value>
-<description>When calculating remaining space, only use this percentage of the real available space
-</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>40</value>
-<description>The number of server threads for the namenode.</description>
-</property>
-
-<property>
-<name>dfs.datanode.max.xcievers</name>
-<value>4096</value>
-<description>PRIVATE CONFIG VARIABLE</description>
-</property>
-
-<!-- Permissions configuration -->
-
-<property>
-<name>dfs.umaskmode</name>
-<value>077</value>
-<description>
-The octal umask used when creating files and directories.
-</description>
-</property>
-
-<property>
-<name>dfs.web.ugi</name>
-<!-- cluster variant -->
-<value>gopher,gopher</value>
-<description>The user account used by the web interface.
-Syntax: USERNAME,GROUP1,GROUP2, ...
-</description>
-</property>
-
-<property>
-<name>dfs.permissions</name>
-<value>true</value>
-<description>
-If "true", enable permission checking in HDFS.
-If "false", permission checking is turned off,
-but all other behavior is unchanged.
-Switching from one parameter value to the other does not change the mode,
-owner or group of files or directories.
-</description>
-</property>
-
-<property>
-<name>dfs.permissions.supergroup</name>
-<value>hdfs</value>
-<description>The name of the group of super-users.</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>100</value>
-<description>Added to grow Queue size so that more client connections are allowed</description>
-</property>
-
-<property>
-<name>ipc.server.max.response.size</name>
-<value>5242880</value>
-</property>
-<property>
-<name>dfs.block.access.token.enable</name>
-<value>true</value>
-<description>
-If "true", access tokens are used as capabilities for accessing datanodes.
-If "false", no access tokens are checked on accessing datanodes.
-</description>
-</property>
-
-<property>
-<name>dfs.namenode.kerberos.principal</name>
-<value></value>
-<description>
-Kerberos principal name for the NameNode
-</description>
-</property>
-
-<property>
-<name>dfs.secondary.namenode.kerberos.principal</name>
-<value></value>
-    <description>
-        Kerberos principal name for the secondary NameNode.
-    </description>
-  </property>
-
-
-<!--
-  This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
--->
-  <property>
-    <name>dfs.namenode.kerberos.https.principal</name>
-    <value></value>
-     <description>The Kerberos principal for the host that the NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.kerberos.https.principal</name>
-    <value></value>
-    <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <!-- cluster variant -->
-    <name>dfs.secondary.http.address</name>
-    <value></value>
-    <description>Address of secondary namenode web server</description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.https.port</name>
-    <value>50490</value>
-    <description>The https port where secondary-namenode binds</description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.principal</name>
-    <value></value>
-    <description>
-      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
-      HTTP SPENGO specification.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.keytab</name>
-    <value></value>
-    <description>
-      The Kerberos keytab file with the credentials for the
-      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.kerberos.principal</name>
-    <value></value>
- <description>
-        The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.keytab.file</name>
-    <value></value>
- <description>
-        Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.keytab.file</name>
-    <value></value>
-  <description>
-        Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.keytab.file</name>
-    <value></value>
- <description>
-        The filename of the keytab file for the DataNode.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.https.port</name>
-    <value>50470</value>
- <description>The https port where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.https.address</name>
-    <value></value>
-  <description>The https address where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.datanode.data.dir.perm</name>
-    <value></value>
-<description>The permissions that should be there on dfs.data.dir
-directories. The datanode will not come up if the permissions are
-different on existing dfs.data.dir directories. If the directories
-don't exist, they will be created with this permission.</description>
-  </property>
-
-  <property>
-  <name>dfs.access.time.precision</name>
-  <value>0</value>
-  <description>The access time for HDFS file is precise upto this value.
-               The default value is 1 hour. Setting a value of 0 disables
-               access times for HDFS.
-  </description>
-</property>
-
-<property>
- <name>dfs.cluster.administrators</name>
- <value> hdfs</value>
- <description>ACL for who all can view the default servlets in the HDFS</description>
-</property>
-
-<property>
-  <name>ipc.server.read.threadpool.size</name>
-  <value>5</value>
-  <description></description>
-</property>
-
-<property>
-  <name>dfs.datanode.failed.volumes.tolerated</name>
-  <value>0</value>
-  <description>Number of failed disks datanode would tolerate</description>
-</property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HDFS/metainfo.xml
deleted file mode 100644
index 1b185e1..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HDFS/metainfo.xml
+++ /dev/null
@@ -1,46 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Apache Hadoop Distributed File System</comment>
-    <version>1.1.2</version>
-
-    <components>
-        <component>
-            <name>NAMENODE</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>DATANODE</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>SECONDARY_NAMENODE</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>HDFS_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HIVE/configuration/hive-site.xml
deleted file mode 100644
index 7d35558..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HIVE/configuration/hive-site.xml
+++ /dev/null
@@ -1,138 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<configuration>
-  <property>
-    <name>hive.metastore.local</name>
-    <value>false</value>
-    <description>controls whether to connect to remove metastore server or
-    open a new metastore server in Hive Client JVM</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionURL</name>
-    <value></value>
-    <description>JDBC connect string for a JDBC metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionDriverName</name>
-    <value>com.mysql.jdbc.Driver</value>
-    <description>Driver class name for a JDBC metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionUserName</name>
-    <value></value>
-    <description>username to use against metastore database</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionPassword</name>
-    <value></value>
-    <description>password to use against metastore database</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.warehouse.dir</name>
-    <value>/apps/hive/warehouse</value>
-    <description>location of default database for the warehouse</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.sasl.enabled</name>
-    <value></value>
-    <description>If true, the metastore thrift interface will be secured with SASL.
-     Clients must authenticate with Kerberos.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.kerberos.keytab.file</name>
-    <value></value>
-    <description>The path to the Kerberos Keytab file containing the metastore
-     thrift server's service principal.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.kerberos.principal</name>
-    <value></value>
-    <description>The service principal for the metastore thrift server. The special
-    string _HOST will be replaced automatically with the correct host name.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.cache.pinobjtypes</name>
-    <value>Table,Database,Type,FieldSchema,Order</value>
-    <description>List of comma separated metastore object types that should be pinned in the cache</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.uris</name>
-    <value></value>
-    <description>URI for client to contact metastore server</description>
-  </property>
-
-  <property>
-    <name>hive.semantic.analyzer.factory.impl</name>
-    <value>org.apache.hivealog.cli.HCatSemanticAnalyzerFactory</value>
-    <description>controls which SemanticAnalyzerFactory implemenation class is used by CLI</description>
-  </property>
-
-  <property>
-    <name>hadoop.clientside.fs.operations</name>
-    <value>true</value>
-    <description>FS operations are owned by client</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.client.socket.timeout</name>
-    <value>60</value>
-    <description>MetaStore Client socket timeout in seconds</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.execute.setugi</name>
-    <value>true</value>
-    <description>In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.</description>
-  </property>
-
-  <property>
-    <name>hive.security.authorization.enabled</name>
-    <value>true</value>
-    <description>enable or disable the hive client authorization</description>
-  </property>
-
-  <property>
-    <name>hive.security.authorization.manager</name>
-    <value>org.apache.hcatalog.security.HdfsAuthorizationProvider</value>
-    <description>the hive client authorization manager class name.
-    The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
-  </property>
-
-  <property>
-    <name>hive.server2.enable.doAs</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>fs.hdfs.impl.disable.cache</name>
-    <value>true</value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HIVE/metainfo.xml
deleted file mode 100644
index 6a52064..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HIVE/metainfo.xml
+++ /dev/null
@@ -1,43 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
-    <version>0.10.0</version>
-
-    <components>        
-        <component>
-            <name>HIVE_METASTORE</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>HIVE_SERVER</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>MYSQL_SERVER</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>HIVE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/MAPREDUCE/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/MAPREDUCE/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/MAPREDUCE/configuration/capacity-scheduler.xml
deleted file mode 100644
index 8034d19..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/MAPREDUCE/configuration/capacity-scheduler.xml
+++ /dev/null
@@ -1,195 +0,0 @@
-<?xml version="1.0"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- This is the configuration file for the resource manager in Hadoop. -->
-<!-- You can configure various scheduling parameters related to queues. -->
-<!-- The properties for a queue follow a naming convention,such as, -->
-<!-- mapred.capacity-scheduler.queue.<queue-name>.property-name. -->
-
-<configuration>
-
-  <property>
-    <name>mapred.capacity-scheduler.maximum-system-jobs</name>
-    <value>3000</value>
-    <description>Maximum number of jobs in the system which can be initialized,
-     concurrently, by the CapacityScheduler.
-    </description>    
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.capacity</name>
-    <value>100</value>
-    <description>Percentage of the number of slots in the cluster that are
-      to be available for jobs in this queue.
-    </description>    
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.maximum-capacity</name>
-    <value>-1</value>
-    <description>
-	maximum-capacity defines a limit beyond which a queue cannot use the capacity of the cluster.
-	This provides a means to limit how much excess capacity a queue can use. By default, there is no limit.
-	The maximum-capacity of a queue can only be greater than or equal to its minimum capacity.
-        Default value of -1 implies a queue can use complete capacity of the cluster.
-
-        This property could be to curtail certain jobs which are long running in nature from occupying more than a 
-        certain percentage of the cluster, which in the absence of pre-emption, could lead to capacity guarantees of 
-        other queues being affected.
-        
-        One important thing to note is that maximum-capacity is a percentage , so based on the cluster's capacity
-        the max capacity would change. So if large no of nodes or racks get added to the cluster , max Capacity in 
-        absolute terms would increase accordingly.
-    </description>    
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.supports-priority</name>
-    <value>false</value>
-    <description>If true, priorities of jobs will be taken into 
-      account in scheduling decisions.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.minimum-user-limit-percent</name>
-    <value>100</value>
-    <description> Each queue enforces a limit on the percentage of resources 
-    allocated to a user at any given time, if there is competition for them. 
-    This user limit can vary between a minimum and maximum value. The former
-    depends on the number of users who have submitted jobs, and the latter is
-    set to this property value. For example, suppose the value of this 
-    property is 25. If two users have submitted jobs to a queue, no single 
-    user can use more than 50% of the queue resources. If a third user submits
-    a job, no single user can use more than 33% of the queue resources. With 4 
-    or more users, no user can use more than 25% of the queue's resources. A 
-    value of 100 implies no user limits are imposed. 
-    </description>
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.user-limit-factor</name>
-    <value>1</value>
-    <description>The multiple of the queue capacity which can be configured to 
-    allow a single user to acquire more slots. 
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks</name>
-    <value>200000</value>
-    <description>The maximum number of tasks, across all jobs in the queue, 
-    which can be initialized concurrently. Once the queue's jobs exceed this 
-    limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks-per-user</name>
-    <value>100000</value>
-    <description>The maximum number of tasks per-user, across all the of the 
-    user's jobs in the queue, which can be initialized concurrently. Once the 
-    user's jobs exceed this limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.init-accept-jobs-factor</name>
-    <value>10</value>
-    <description>The multipe of (maximum-system-jobs * queue-capacity) used to 
-    determine the number of jobs which are accepted by the scheduler.  
-    </description>
-  </property>
-
-  <!-- The default configuration settings for the capacity task scheduler -->
-  <!-- The default values would be applied to all the queues which don't have -->
-  <!-- the appropriate property for the particular queue -->
-  <property>
-    <name>mapred.capacity-scheduler.default-supports-priority</name>
-    <value>false</value>
-    <description>If true, priorities of jobs will be taken into 
-      account in scheduling decisions by default in a job queue.
-    </description>
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.default-minimum-user-limit-percent</name>
-    <value>100</value>
-    <description>The percentage of the resources limited to a particular user
-      for the job queue at any given point of time by default.
-    </description>
-  </property>
-
-
-  <property>
-    <name>mapred.capacity-scheduler.default-user-limit-factor</name>
-    <value>1</value>
-    <description>The default multiple of queue-capacity which is used to 
-    determine the amount of slots a single user can consume concurrently.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.default-maximum-active-tasks-per-queue</name>
-    <value>200000</value>
-    <description>The default maximum number of tasks, across all jobs in the 
-    queue, which can be initialized concurrently. Once the queue's jobs exceed 
-    this limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.default-maximum-active-tasks-per-user</name>
-    <value>100000</value>
-    <description>The default maximum number of tasks per-user, across all the of 
-    the user's jobs in the queue, which can be initialized concurrently. Once 
-    the user's jobs exceed this limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.default-init-accept-jobs-factor</name>
-    <value>10</value>
-    <description>The default multipe of (maximum-system-jobs * queue-capacity) 
-    used to determine the number of jobs which are accepted by the scheduler.  
-    </description>
-  </property>
-
-  <!-- Capacity scheduler Job Initialization configuration parameters -->
-  <property>
-    <name>mapred.capacity-scheduler.init-poll-interval</name>
-    <value>5000</value>
-    <description>The amount of time in miliseconds which is used to poll 
-    the job queues for jobs to initialize.
-    </description>
-  </property>
-  <property>
-    <name>mapred.capacity-scheduler.init-worker-threads</name>
-    <value>5</value>
-    <description>Number of worker threads which would be used by
-    Initialization poller to initialize jobs in a set of queue.
-    If number mentioned in property is equal to number of job queues
-    then a single thread would initialize jobs in a queue. If lesser
-    then a thread would get a set of queues assigned. If the number
-    is greater then number of threads would be equal to number of 
-    job queues.
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/MAPREDUCE/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/MAPREDUCE/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/MAPREDUCE/configuration/core-site.xml
deleted file mode 100644
index 3a2af49..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/MAPREDUCE/configuration/core-site.xml
+++ /dev/null
@@ -1,20 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<configuration>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/MAPREDUCE/configuration/mapred-queue-acls.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/MAPREDUCE/configuration/mapred-queue-acls.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/MAPREDUCE/configuration/mapred-queue-acls.xml
deleted file mode 100644
index ce12380..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/MAPREDUCE/configuration/mapred-queue-acls.xml
+++ /dev/null
@@ -1,39 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- mapred-queue-acls.xml -->
-<configuration>
-
-
-<!-- queue default -->
-
-  <property>
-    <name>mapred.queue.default.acl-submit-job</name>
-    <value>*</value>
-  </property>
-
-  <property>
-    <name>mapred.queue.default.acl-administer-jobs</name>
-    <value>*</value>
-  </property>
-
-  <!-- END ACLs -->
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/MAPREDUCE/configuration/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/MAPREDUCE/configuration/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/MAPREDUCE/configuration/mapred-site.xml
deleted file mode 100644
index ece056b..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/MAPREDUCE/configuration/mapred-site.xml
+++ /dev/null
@@ -1,531 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-<!-- i/o properties -->
-
-  <property>
-    <name>io.sort.mb</name>
-    <value></value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>io.sort.record.percent</name>
-    <value>.2</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>io.sort.spill.percent</name>
-    <value></value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>io.sort.factor</name>
-    <value>100</value>
-    <description>No description</description>
-  </property>
-
-<!-- map/reduce properties -->
-
-<property>
-  <name>mapred.tasktracker.tasks.sleeptime-before-sigkill</name>
-  <value>250</value>
-  <description>Normally, this is the amount of time before killing
-  processes, and the recommended-default is 5.000 seconds - a value of
-  5000 here.  In this case, we are using it solely to blast tasks before
-  killing them, and killing them very quickly (1/4 second) to guarantee
-  that we do not leave VMs around for later jobs.
-  </description>
-</property>
-
-  <property>
-    <name>mapred.job.tracker.handler.count</name>
-    <value>50</value>
-    <description>
-    The number of server threads for the JobTracker. This should be roughly
-    4% of the number of tasktracker nodes.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.system.dir</name>
-    <value></value>
-    <description>No description</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>No description</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker.http.address</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>No description</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <!-- cluster specific -->
-    <name>mapred.local.dir</name>
-    <value></value>
-    <description>No description</description>
-    <final>true</final>
-  </property>
-
-  <property>
-  <name>mapreduce.cluster.administrators</name>
-  <value> hadoop</value>
-  </property>
-
-  <property>
-    <name>mapred.reduce.parallel.copies</name>
-    <value>30</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.tasktracker.map.tasks.maximum</name>
-    <value></value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.tasktracker.reduce.tasks.maximum</name>
-    <value></value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>tasktracker.http.threads</name>
-    <value>50</value>
-  </property>
-
-  <property>
-    <name>mapred.map.tasks.speculative.execution</name>
-    <value>false</value>
-    <description>If true, then multiple instances of some map tasks
-               may be executed in parallel.</description>
-  </property>
-
-  <property>
-    <name>mapred.reduce.tasks.speculative.execution</name>
-    <value>false</value>
-    <description>If true, then multiple instances of some reduce tasks
-               may be executed in parallel.</description>
-  </property>
-
-  <property>
-    <name>mapred.reduce.slowstart.completed.maps</name>
-    <value>0.05</value>
-  </property>
-
-  <property>
-    <name>mapred.inmem.merge.threshold</name>
-    <value>1000</value>
-    <description>The threshold, in terms of the number of files
-  for the in-memory merge process. When we accumulate threshold number of files
-  we initiate the in-memory merge and spill to disk. A value of 0 or less than
-  0 indicates we want to DON'T have any threshold and instead depend only on
-  the ramfs's memory consumption to trigger the merge.
-  </description>
-  </property>
-
-  <property>
-    <name>mapred.job.shuffle.merge.percent</name>
-    <value>0.66</value>
-    <description>The usage threshold at which an in-memory merge will be
-  initiated, expressed as a percentage of the total memory allocated to
-  storing in-memory map outputs, as defined by
-  mapred.job.shuffle.input.buffer.percent.
-  </description>
-  </property>
-
-  <property>
-    <name>mapred.job.shuffle.input.buffer.percent</name>
-    <value>0.7</value>
-    <description>The percentage of memory to be allocated from the maximum heap
-  size to storing map outputs during the shuffle.
-  </description>
-  </property>
-
-  <property>
-    <name>mapred.map.output.compression.codec</name>
-    <value>org.apache.hadoop.io.compress.SnappyCodec</value>
-    <description>If the map outputs are compressed, how should they be
-      compressed
-    </description>
-  </property>
-
-<property>
-  <name>mapred.output.compression.type</name>
-  <value>BLOCK</value>
-  <description>If the job outputs are to compressed as SequenceFiles, how should
-               they be compressed? Should be one of NONE, RECORD or BLOCK.
-  </description>
-</property>
-
-
-  <property>
-    <name>mapred.jobtracker.completeuserjobs.maximum</name>
-    <value>5</value>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.taskScheduler</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.restart.recover</name>
-    <value>false</value>
-    <description>"true" to enable (job) recovery upon restart,
-               "false" to start afresh
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.reduce.input.buffer.percent</name>
-    <value>0.0</value>
-    <description>The percentage of memory- relative to the maximum heap size- to
-  retain map outputs during the reduce. When the shuffle is concluded, any
-  remaining map outputs in memory must consume less than this threshold before
-  the reduce can begin.
-  </description>
-  </property>
-
- <property>
-  <name>mapreduce.reduce.input.limit</name>
-  <value>10737418240</value>
-  <description>The limit on the input size of the reduce. (This value
-  is 10 Gb.)  If the estimated input size of the reduce is greater than
-  this value, job is failed. A value of -1 means that there is no limit
-  set. </description>
-</property>
-
-
-  <!-- copied from kryptonite configuration -->
-  <property>
-    <name>mapred.compress.map.output</name>
-    <value></value>
-  </property>
-
-
-  <property>
-    <name>mapred.task.timeout</name>
-    <value>600000</value>
-    <description>The number of milliseconds before a task will be
-  terminated if it neither reads an input, writes an output, nor
-  updates its status string.
-  </description>
-  </property>
-
-  <property>
-    <name>jetty.connector</name>
-    <value>org.mortbay.jetty.nio.SelectChannelConnector</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.task.tracker.task-controller</name>
-    <value></value>
-   <description>
-     TaskController which is used to launch and manage task execution.
-  </description>
-  </property>
-
-  <property>
-    <name>mapred.child.root.logger</name>
-    <value>INFO,TLA</value>
-  </property>
-
-  <property>
-    <name>mapred.child.java.opts</name>
-    <value></value>
-
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.cluster.map.memory.mb</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.cluster.reduce.memory.mb</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.job.map.memory.mb</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.job.reduce.memory.mb</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.cluster.max.map.memory.mb</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.cluster.max.reduce.memory.mb</name>
-    <value></value>
-  </property>
-
-<property>
-  <name>mapred.hosts</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.hosts.exclude</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.max.tracker.blacklists</name>
-  <value>16</value>
-  <description>
-    if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted
-  </description>
-</property>
-
-<property>
-  <name>mapred.healthChecker.script.path</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.healthChecker.interval</name>
-  <value>135000</value>
-</property>
-
-<property>
-  <name>mapred.healthChecker.script.timeout</name>
-  <value>60000</value>
-</property>
-
-<property>
-  <name>mapred.job.tracker.persist.jobstatus.active</name>
-  <value>false</value>
-  <description>Indicates if persistency of job status information is
-  active or not.
-  </description>
-</property>
-
-<property>
-  <name>mapred.job.tracker.persist.jobstatus.hours</name>
-  <value>1</value>
-  <description>The number of hours job status information is persisted in DFS.
-    The job status information will be available after it drops of the memory
-    queue and between jobtracker restarts. With a zero value the job status
-    information is not persisted at all in DFS.
-  </description>
-</property>
-
-<property>
-  <name>mapred.job.tracker.persist.jobstatus.dir</name>
-  <value></value>
-  <description>The directory where the job status information is persisted
-   in a file system to be available after it drops of the memory queue and
-   between jobtracker restarts.
-  </description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.retirejob.check</name>
-  <value>10000</value>
-</property>
-
-<property>
-  <name>mapred.jobtracker.retirejob.interval</name>
-  <value>21600000</value>
-</property>
-
-<property>
-  <name>mapred.job.tracker.history.completed.location</name>
-  <value>/mapred/history/done</value>
-  <description>No description</description>
-</property>
-
-<property>
-  <name>mapred.task.maxvmem</name>
-  <value></value>
-  <final>true</final>
-   <description>No description</description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.maxtasks.per.job</name>
-  <value></value>
-  <final>true</final>
-  <description>The maximum number of tasks for a single job.
-  A value of -1 indicates that there is no maximum.  </description>
-</property>
-
-<property>
-  <name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name>
-  <value>false</value>
-</property>
-
-<property>
-  <name>mapred.userlog.retain.hours</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.job.reuse.jvm.num.tasks</name>
-  <value>1</value>
-  <description>
-    How many tasks to run per jvm. If set to -1, there is no limit
-  </description>
-  <final>true</final>
-</property>
-
-<property>
-  <name>mapreduce.jobtracker.kerberos.principal</name>
-  <value></value>
-  <description>
-      JT user name key.
- </description>
-</property>
-
-<property>
-  <name>mapreduce.tasktracker.kerberos.principal</name>
-   <value></value>
-  <description>
-       tt user name key. "_HOST" is replaced by the host name of the task tracker.
-   </description>
-</property>
-
-
-  <property>
-    <name>hadoop.job.history.user.location</name>
-    <value>none</value>
-    <final>true</final>
-  </property>
-
-
- <property>
-   <name>mapreduce.jobtracker.keytab.file</name>
-   <value></value>
-   <description>
-       The keytab for the jobtracker principal.
-   </description>
-
-</property>
-
- <property>
-   <name>mapreduce.tasktracker.keytab.file</name>
-   <value></value>
-    <description>The filename of the keytab for the task tracker</description>
- </property>
-
- <property>
-   <name>mapreduce.jobtracker.staging.root.dir</name>
-   <value>/user</value>
- <description>The Path prefix for where the staging directories should be placed. The next level is always the user's
-   name. It is a path in the default file system.</description>
- </property>
-
- <property>
-      <name>mapreduce.tasktracker.group</name>
-      <value>hadoop</value>
-      <description>The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.</description>
-
- </property>
-
-  <property>
-    <name>mapreduce.jobtracker.split.metainfo.maxsize</name>
-    <value>50000000</value>
-    <final>true</final>
-     <description>If the size of the split metainfo file is larger than this, the JobTracker will fail the job during
-    initialize.
-   </description>
-  </property>
-  <property>
-    <name>mapreduce.history.server.embedded</name>
-    <value>false</value>
-    <description>Should job history server be embedded within Job tracker
-process</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapreduce.history.server.http.address</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>Http address of the history server</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapreduce.jobhistory.kerberos.principal</name>
-    <!-- cluster variant -->
-  <value></value>
-    <description>Job history user name key. (must map to same user as JT
-user)</description>
-  </property>
-
- <property>
-   <name>mapreduce.jobhistory.keytab.file</name>
-    <!-- cluster variant -->
-   <value></value>
-   <description>The keytab for the job history server principal.</description>
- </property>
-
-<property>
-  <name>mapred.jobtracker.blacklist.fault-timeout-window</name>
-  <value>180</value>
-  <description>
-    3-hour sliding window (value is in minutes)
-  </description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.blacklist.fault-bucket-width</name>
-  <value>15</value>
-  <description>
-    15-minute bucket size (value is in minutes)
-  </description>
-</property>
-
-<property>
-  <name>mapred.queue.names</name>
-  <value>default</value>
-  <description> Comma separated list of queues configured for this jobtracker.</description>
-</property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/MAPREDUCE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/MAPREDUCE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/MAPREDUCE/metainfo.xml
deleted file mode 100644
index 79d219b..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/MAPREDUCE/metainfo.xml
+++ /dev/null
@@ -1,41 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>mapred</user>
-    <comment>Apache Hadoop Distributed Processing Framework</comment>
-    <version>1.1.2</version>
-
-    <components>
-        <component>
-            <name>JOBTRACKER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>TASKTRACKER</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>MAPREDUCE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/NAGIOS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/NAGIOS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/NAGIOS/metainfo.xml
deleted file mode 100644
index bd7de07..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/NAGIOS/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Nagios Monitoring and Alerting system</comment>
-    <version>3.2.3</version>
-
-    <components>
-        <component>
-            <name>NAGIOS_SERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/OOZIE/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/OOZIE/configuration/oozie-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/OOZIE/configuration/oozie-site.xml
deleted file mode 100644
index 1665ba8..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/OOZIE/configuration/oozie-site.xml
+++ /dev/null
@@ -1,245 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-        
-       http://www.apache.org/licenses/LICENSE-2.0
-  
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->     
-
-<configuration>
-
-<!--
-    Refer to the oozie-default.xml file for the complete list of
-    Oozie configuration properties and their default values.
--->
-  <property>
-    <name>oozie.base.url</name>
-    <value>http://localhost:11000/oozie</value>
-    <description>Base Oozie URL.</description>
-   </property>
-
-  <property>
-    <name>oozie.system.id</name>
-    <value>oozie-${user.name}</value>
-    <description>
-    The Oozie system ID.
-    </description>
-   </property>
-
-   <property>
-     <name>oozie.systemmode</name>
-     <value>NORMAL</value>
-     <description>
-     System mode for  Oozie at startup.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.AuthorizationService.security.enabled</name>
-     <value>true</value>
-     <description>
-     Specifies whether security (user name/admin role) is enabled or not.
-     If disabled any user can manage Oozie system and manage any job.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.PurgeService.older.than</name>
-     <value>30</value>
-     <description>
-     Jobs older than this value, in days, will be purged by the PurgeService.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.PurgeService.purge.interval</name>
-     <value>3600</value>
-     <description>
-     Interval at which the purge service will run, in seconds.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.queue.size</name>
-     <value>1000</value>
-     <description>Max callable queue size</description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.threads</name>
-     <value>10</value>
-     <description>Number of threads used for executing callables</description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.callable.concurrency</name>
-     <value>3</value>
-     <description>
-     Maximum concurrency for a given callable type.
-     Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
-     Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
-     All commands that use action executors (action-start, action-end, action-kill and action-check) use
-     the action type as the callable type.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.coord.normal.default.timeout</name>
-     <value>120</value>
-     <description>Default timeout for a coordinator action input check (in minutes) for normal job.
-      -1 means infinite timeout</description>
-   </property>
-
-   <property>
-     <name>oozie.db.schema.name</name>
-     <value>oozie</value>
-     <description>
-      Oozie DataBase Name
-     </description>
-   </property>
-
-    <property>
-      <name>oozie.service.HadoopAccessorService.jobTracker.whitelist</name>
-      <value> </value>
-      <description>
-      Whitelisted job tracker for Oozie service.
-      </description>
-    </property>
-   
-    <property>
-      <name>oozie.authentication.type</name>
-      <value>simple</value>
-      <description>
-      </description>
-    </property>
-   
-    <property>
-      <name>oozie.service.HadoopAccessorService.nameNode.whitelist</name>
-      <value> </value>
-      <description>
-      </description>
-    </property>
-
-    <property>
-      <name>oozie.service.WorkflowAppService.system.libpath</name>
-      <value>/user/${user.name}/share/lib</value>
-      <description>
-      System library path to use for workflow applications.
-      This path is added to workflow application if their job properties sets
-      the property 'oozie.use.system.libpath' to true.
-      </description>
-    </property>
-
-    <property>
-      <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
-      <value>false</value>
-      <description>
-      If set to true, submissions of MapReduce and Pig jobs will include
-      automatically the system library path, thus not requiring users to
-      specify where the Pig JAR files are. Instead, the ones from the system
-      library path are used.
-      </description>
-    </property>
-    <property>
-      <name>oozie.authentication.kerberos.name.rules</name>
-      <value>
-        RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/
-        RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/
-        RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
-        RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
-        DEFAULT
-        </value>
-      <description>The mapping from kerberos principal names to local OS user names.</description>
-    </property>
-    <property>
-      <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
-      <value>*=/etc/hadoop/conf</value>
-      <description>
-          Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
-          the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
-          used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
-          the relevant Hadoop *-site.xml files. If the path is relative is looked within
-          the Oozie configuration directory; though the path can be absolute (i.e. to point
-          to Hadoop client conf/ directories in the local filesystem.
-      </description>
-    </property>
-    <property>
-        <name>oozie.service.ActionService.executor.ext.classes</name>
-        <value>
-            org.apache.oozie.action.email.EmailActionExecutor,
-            org.apache.oozie.action.hadoop.HiveActionExecutor,
-            org.apache.oozie.action.hadoop.ShellActionExecutor,
-            org.apache.oozie.action.hadoop.SqoopActionExecutor,
-            org.apache.oozie.action.hadoop.DistcpActionExecutor
-        </value>
-    </property>
-
-    <property>
-        <name>oozie.service.SchemaService.wf.ext.schemas</name>
-        <value>shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd</value>
-    </property>
-    <property>
-        <name>oozie.service.JPAService.create.db.schema</name>
-        <value>false</value>
-        <description>
-            Creates Oozie DB.
-
-            If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
-            If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.driver</name>
-        <value>org.apache.derby.jdbc.EmbeddedDriver</value>
-        <description>
-            JDBC driver class.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.url</name>
-        <value>jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true</value>
-        <description>
-            JDBC URL.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.username</name>
-        <value>sa</value>
-        <description>
-            DB user name.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.password</name>
-        <value> </value>
-        <description>
-            DB user password.
-
-            IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
-                       if empty Configuration assumes it is NULL.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.pool.max.active.conn</name>
-        <value>10</value>
-        <description>
-             Max number of connections.
-        </description>
-    </property>
-</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/OOZIE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/OOZIE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/OOZIE/metainfo.xml
deleted file mode 100644
index 83ccb06..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/OOZIE/metainfo.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>System for workflow coordination and execution of Apache Hadoop jobs</comment>
-    <version>3.2.0</version>
-
-    <components>
-        <component>
-            <name>OOZIE_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>OOZIE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/PIG/configuration/pig.properties
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/PIG/configuration/pig.properties b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/PIG/configuration/pig.properties
deleted file mode 100644
index 01000b5..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/PIG/configuration/pig.properties
+++ /dev/null
@@ -1,52 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Pig default configuration file. All values can be overwritten by pig.properties and command line arguments.
-# see bin/pig -help
-
-# brief logging (no timestamps)
-brief=false
-
-#debug level, INFO is default
-debug=INFO
-
-#verbose print all log messages to screen (default to print only INFO and above to screen)
-verbose=false
-
-#exectype local|mapreduce, mapreduce is default
-exectype=mapreduce
-
-#Enable insertion of information about script into hadoop job conf 
-pig.script.info.enabled=true
-
-#Do not spill temp files smaller than this size (bytes)
-pig.spill.size.threshold=5000000
-#EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
-#This should help reduce the number of files being spilled.
-pig.spill.gc.activation.size=40000000
-
-#the following two parameters are to help estimate the reducer number
-pig.exec.reducers.bytes.per.reducer=1000000000
-pig.exec.reducers.max=999
-
-#Temporary location to store the intermediate data.
-pig.temp.dir=/tmp/
-
-#Threshold for merging FRJoin fragment files
-pig.files.concatenation.threshold=100
-pig.optimistic.files.concatenation=false;
-
-pig.disable.counter=false

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/PIG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/PIG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/PIG/metainfo.xml
deleted file mode 100644
index 4982fd2..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/PIG/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Scripting platform for analyzing large datasets</comment>
-    <version>0.10.1</version>
-
-    <components>
-        <component>
-            <name>PIG</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/SQOOP/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/SQOOP/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/SQOOP/metainfo.xml
deleted file mode 100644
index ae0e68b..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/SQOOP/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Tool for transferring bulk data between Apache Hadoop and structured data stores such as relational databases</comment>
-    <version>1.4.2</version>
-
-    <components>
-        <component>
-            <name>SQOOP</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>


[09/14] AMBARI-3777. Remove HDPLocal stack from stack definition. (yusaku)

Posted by yu...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/PIG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/PIG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/PIG/metainfo.xml
deleted file mode 100644
index 6806c54..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/PIG/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Scripting platform for analyzing large datasets</comment>
-    <version>0.11.1.1.3.0.0</version>
-
-    <components>
-        <component>
-            <name>PIG</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/SQOOP/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/SQOOP/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/SQOOP/metainfo.xml
deleted file mode 100644
index 1924c54..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/SQOOP/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Tool for transferring bulk data between Apache Hadoop and structured data stores such as relational databases</comment>
-    <version>1.4.3.1.3.0.0</version>
-
-    <components>
-        <component>
-            <name>SQOOP</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/WEBHCAT/configuration/webhcat-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/WEBHCAT/configuration/webhcat-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/WEBHCAT/configuration/webhcat-site.xml
deleted file mode 100644
index 31d0113..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/WEBHCAT/configuration/webhcat-site.xml
+++ /dev/null
@@ -1,126 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- 
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<!-- The default settings for Templeton. -->
-<!-- Edit templeton-site.xml to change settings for your local -->
-<!-- install. -->
-
-<configuration>
-
-  <property>
-    <name>templeton.port</name>
-      <value>50111</value>
-    <description>The HTTP port for the main server.</description>
-  </property>
-
-  <property>
-    <name>templeton.hadoop.conf.dir</name>
-    <value>/etc/hadoop/conf</value>
-    <description>The path to the Hadoop configuration.</description>
-  </property>
-
-  <property>
-    <name>templeton.jar</name>
-    <value>/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar</value>
-    <description>The path to the Templeton jar file.</description>
-  </property>
-
-  <property>
-    <name>templeton.libjars</name>
-    <value>/usr/lib/zookeeper/zookeeper.jar</value>
-    <description>Jars to add the the classpath.</description>
-  </property>
-
-
-  <property>
-    <name>templeton.hadoop</name>
-    <value>/usr/bin/hadoop</value>
-    <description>The path to the Hadoop executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.archive</name>
-    <value>hdfs:///apps/webhcat/pig.tar.gz</value>
-    <description>The path to the Pig archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.path</name>
-    <value>pig.tar.gz/pig/bin/pig</value>
-    <description>The path to the Pig executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hcat</name>
-    <value>/usr/bin/hcat</value>
-    <description>The path to the hcatalog executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.archive</name>
-    <value>hdfs:///apps/webhcat/hive.tar.gz</value>
-    <description>The path to the Hive archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.path</name>
-    <value>hive.tar.gz/hive/bin/hive</value>
-    <description>The path to the Hive executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.properties</name>
-    <value></value>
-    <description>Properties to set when running hive.</description>
-  </property>
-
-
-  <property>
-    <name>templeton.zookeeper.hosts</name>
-    <value></value>
-    <description>ZooKeeper servers, as comma separated host:port pairs</description>
-  </property>
-
-  <property>
-    <name>templeton.storage.class</name>
-    <value>org.apache.hcatalog.templeton.tool.ZooKeeperStorage</value>
-    <description>The class to use as storage</description>
-  </property>
-
-  <property>
-   <name>templeton.override.enabled</name>
-   <value>false</value>
-   <description>
-     Enable the override path in templeton.override.jars
-   </description>
- </property>
-
- <property>
-    <name>templeton.streaming.jar</name>
-    <value>hdfs:///apps/webhcat/hadoop-streaming.jar</value>
-    <description>The hdfs path to the Hadoop streaming jar file.</description>
-  </property> 
-
-  <property>
-    <name>templeton.exec.timeout</name>
-    <value>60000</value>
-    <description>Time out for templeton api</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/WEBHCAT/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/WEBHCAT/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/WEBHCAT/metainfo.xml
deleted file mode 100644
index 15c8daa..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/WEBHCAT/metainfo.xml
+++ /dev/null
@@ -1,31 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for WEBHCAT service</comment>
-    <version>0.11.0.1.3.0.0</version>
-
-    <components>
-        <component>
-            <name>WEBHCAT_SERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/ZOOKEEPER/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/ZOOKEEPER/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/ZOOKEEPER/configuration/global.xml
deleted file mode 100644
index f78df89..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/ZOOKEEPER/configuration/global.xml
+++ /dev/null
@@ -1,75 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>zk_user</name>
-    <value>zookeeper</value>
-    <description>ZooKeeper User.</description>
-  </property>
-  <property>
-    <name>zookeeperserver_host</name>
-    <value></value>
-    <description>ZooKeeper Server Hosts.</description>
-  </property>
-  <property>
-    <name>zk_data_dir</name>
-    <value>/hadoop/zookeeper</value>
-    <description>Data directory for ZooKeeper.</description>
-  </property>
-  <property>
-    <name>zk_log_dir</name>
-    <value>/var/log/zookeeper</value>
-    <description>ZooKeeper Log Dir</description>
-  </property>
-  <property>
-    <name>zk_pid_dir</name>
-    <value>/var/run/zookeeper</value>
-    <description>ZooKeeper Pid Dir</description>
-  </property>
-  <property>
-    <name>zk_pid_file</name>
-    <value>/var/run/zookeeper/zookeeper_server.pid</value>
-    <description>ZooKeeper Pid File</description>
-  </property>
-  <property>
-    <name>tickTime</name>
-    <value>2000</value>
-    <description>The length of a single tick in milliseconds, which is the basic time unit used by ZooKeeper</description>
-  </property>
-  <property>
-    <name>initLimit</name>
-    <value>10</value>
-    <description>Ticks to allow for sync at Init.</description>
-  </property>
-  <property>
-    <name>syncLimit</name>
-    <value>5</value>
-    <description>Ticks to allow for sync at Runtime.</description>
-  </property>
-  <property>
-    <name>clientPort</name>
-    <value>2181</value>
-    <description>Port for running ZK Server.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/ZOOKEEPER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/ZOOKEEPER/metainfo.xml
deleted file mode 100644
index 3dc129b..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/ZOOKEEPER/metainfo.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Centralized service which provides highly reliable distributed coordination</comment>
-    <version>3.4.5.1.3.0.0</version>
-
-    <components>
-        <component>
-            <name>ZOOKEEPER_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>ZOOKEEPER_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/metainfo.xml
deleted file mode 100644
index 45a63e5..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/metainfo.xml
+++ /dev/null
@@ -1,22 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <versions>
-	  <active>false</active>
-    </versions>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/repos/repoinfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/repos/repoinfo.xml
deleted file mode 100644
index f55fe25..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/repos/repoinfo.xml
+++ /dev/null
@@ -1,75 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<reposinfo>
-  <os type="centos6">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/1.x/updates/1.3.2.0</baseurl>
-      <repoid>HDP-1.3.2</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="centos5">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos5/1.x/updates/1.3.2.0</baseurl>
-      <repoid>HDP-1.3.2</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="redhat6">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/1.x/updates/1.3.2.0</baseurl>
-      <repoid>HDP-1.3.2</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="redhat5">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos5/1.x/updates/1.3.2.0</baseurl>
-      <repoid>HDP-1.3.2</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="oraclelinux6">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/1.x/updates/1.3.2.0</baseurl>
-      <repoid>HDP-1.3.2</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="oraclelinux5">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos5/1.x/updates/1.3.2.0</baseurl>
-      <repoid>HDP-1.3.2</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="suse11">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/suse11/1.x/updates/1.3.2.0</baseurl>
-      <repoid>HDP-1.3.2</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-    <os type="sles11">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/suse11/1.x/updates/1.3.2.0</baseurl>
-      <repoid>HDP-1.3.2</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-</reposinfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/FLUME/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/FLUME/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/FLUME/configuration/global.xml
deleted file mode 100644
index f1fa4de..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/FLUME/configuration/global.xml
+++ /dev/null
@@ -1,24 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/FLUME/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/FLUME/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/FLUME/metainfo.xml
deleted file mode 100644
index 185f685..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/FLUME/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Flume is a distributed, reliable, and available system for efficiently collecting, aggregating and moving large amounts of log data from many different sources to a centralized data store.</comment>
-    <version>1.3.1.1.3.2.0</version>
-
-    <components>
-        <component>
-            <name>FLUME_SERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/GANGLIA/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/GANGLIA/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/GANGLIA/configuration/global.xml
deleted file mode 100644
index 16df0b8..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/GANGLIA/configuration/global.xml
+++ /dev/null
@@ -1,55 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>ganglia_conf_dir</name>
-    <value>/etc/ganglia/hdp</value>
-    <description>Config directory for Ganglia</description>
-  </property>
-  <property>
-    <name>ganglia_runtime_dir</name>
-    <value>/var/run/ganglia/hdp</value>
-    <description>Run directories for Ganglia</description>
-  </property>
-  <property>
-    <name>ganglia_runtime_dir</name>
-    <value>/var/run/ganglia/hdp</value>
-    <description>Run directories for Ganglia</description>
-  </property>
-  <property>
-    <name>gmetad_user</name>
-    <value>nobody</value>
-    <description>User </description>
-  </property>
-    <property>
-    <name>gmond_user</name>
-    <value>nobody</value>
-    <description>User </description>
-  </property>
-  <property>
-    <name>rrdcached_base_dir</name>
-    <value>/var/lib/ganglia/rrds</value>
-    <description>Default directory for saving the rrd files on ganglia server</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/GANGLIA/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/GANGLIA/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/GANGLIA/metainfo.xml
deleted file mode 100644
index 9fee795..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/GANGLIA/metainfo.xml
+++ /dev/null
@@ -1,40 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Ganglia Metrics Collection system</comment>
-    <version>3.5.0</version>
-
-    <components>
-        <component>
-            <name>GANGLIA_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>GANGLIA_MONITOR</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>MONITOR_WEBSERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HBASE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HBASE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HBASE/configuration/global.xml
deleted file mode 100644
index 453184b..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HBASE/configuration/global.xml
+++ /dev/null
@@ -1,160 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>hbasemaster_host</name>
-    <value></value>
-    <description>HBase Master Host.</description>
-  </property>
-  <property>
-    <name>regionserver_hosts</name>
-    <value></value>
-    <description>Region Server Hosts</description>
-  </property>
-  <property>
-    <name>hbase_log_dir</name>
-    <value>/var/log/hbase</value>
-    <description>Log Directories for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_pid_dir</name>
-    <value>/var/run/hbase</value>
-    <description>Log Directories for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_log_dir</name>
-    <value>/var/log/hbase</value>
-    <description>Log Directories for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_regionserver_heapsize</name>
-    <value>1024</value>
-    <description>Log Directories for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_master_heapsize</name>
-    <value>1024</value>
-    <description>HBase Master Heap Size</description>
-  </property>
-  <property>
-    <name>hstore_compactionthreshold</name>
-    <value>3</value>
-    <description>HBase HStore compaction threshold.</description>
-  </property>
-  <property>
-    <name>hfile_blockcache_size</name>
-    <value>0.25</value>
-    <description>HFile block cache size.</description>
-  </property>
-  <property>
-    <name>hstorefile_maxsize</name>
-    <value>10737418240</value>
-    <description>Maximum HStoreFile Size</description>
-  </property>
-    <property>
-    <name>regionserver_handlers</name>
-    <value>30</value>
-    <description>HBase RegionServer Handler</description>
-  </property>
-    <property>
-    <name>hregion_majorcompaction</name>
-    <value>86400000</value>
-    <description>HBase Major Compaction.</description>
-  </property>
-    <property>
-    <name>hregion_blockmultiplier</name>
-    <value>2</value>
-    <description>HBase Region Block Multiplier</description>
-  </property>
-    <property>
-    <name>hregion_memstoreflushsize</name>
-    <value></value>
-    <description>HBase Region MemStore Flush Size.</description>
-  </property>
-    <property>
-    <name>client_scannercaching</name>
-    <value>100</value>
-    <description>Base Client Scanner Caching</description>
-  </property>
-    <property>
-    <name>zookeeper_sessiontimeout</name>
-    <value>60000</value>
-    <description>ZooKeeper Session Timeout</description>
-  </property>
-    <property>
-    <name>hfile_max_keyvalue_size</name>
-    <value>10485760</value>
-    <description>HBase Client Maximum key-value Size</description>
-  </property>
-  <property>
-    <name>hbase_hdfs_root_dir</name>
-    <value>/apps/hbase/data</value>
-    <description>HBase Relative Path to HDFS.</description>
-  </property>
-   <property>
-    <name>hbase_conf_dir</name>
-    <value>/etc/hbase</value>
-    <description>Config Directory for HBase.</description>
-  </property>
-   <property>
-    <name>hdfs_enable_shortcircuit_read</name>
-    <value>true</value>
-    <description>HDFS Short Circuit Read</description>
-  </property>
-   <property>
-    <name>hdfs_support_append</name>
-    <value>true</value>
-    <description>HDFS append support</description>
-  </property>
-   <property>
-    <name>hstore_blockingstorefiles</name>
-    <value>7</value>
-    <description>HStore blocking storefiles.</description>
-  </property>
-   <property>
-    <name>regionserver_memstore_lab</name>
-    <value>true</value>
-    <description>Region Server memstore.</description>
-  </property>
-   <property>
-    <name>regionserver_memstore_lowerlimit</name>
-    <value>0.35</value>
-    <description>Region Server memstore lower limit.</description>
-  </property>
-   <property>
-    <name>regionserver_memstore_upperlimit</name>
-    <value>0.4</value>
-    <description>Region Server memstore upper limit.</description>
-  </property>
-   <property>
-    <name>hbase_conf_dir</name>
-    <value>/etc/hbase</value>
-    <description>HBase conf dir.</description>
-  </property>
-   <property>
-    <name>hbase_user</name>
-    <value>hbase</value>
-    <description>HBase User Name.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HBASE/configuration/hbase-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HBASE/configuration/hbase-policy.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HBASE/configuration/hbase-policy.xml
deleted file mode 100644
index e45f23c..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HBASE/configuration/hbase-policy.xml
+++ /dev/null
@@ -1,53 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HRegionInterface protocol implementations (ie. 
-    clients talking to HRegionServers)
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.admin.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HMasterInterface protocol implementation (ie. 
-    clients talking to HMaster for admin operations).
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.masterregion.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HMasterRegionInterface protocol implementations
-    (for HRegionServers communicating with HMaster)
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HBASE/configuration/hbase-site.xml
deleted file mode 100644
index ead0c52..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HBASE/configuration/hbase-site.xml
+++ /dev/null
@@ -1,367 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>hbase.rootdir</name>
-    <value></value>
-    <description>The directory shared by region servers and into
-      which HBase persists.  The URL should be 'fully-qualified'
-      to include the filesystem scheme.  For example, to specify the
-      HDFS directory '/hbase' where the HDFS instance's namenode is
-      running at namenode.example.org on port 9000, set this value to:
-      hdfs://namenode.example.org:9000/hbase.  By default HBase writes
-      into /tmp.  Change this configuration else all data will be lost
-      on machine restart.
-    </description>
-  </property>
-  <property>
-    <name>hbase.cluster.distributed</name>
-    <value>true</value>
-    <description>The mode the cluster will be in. Possible values are
-      false for standalone mode and true for distributed mode.  If
-      false, startup will run all HBase and ZooKeeper daemons together
-      in the one JVM.
-    </description>
-  </property>
-  <property>
-    <name>hbase.tmp.dir</name>
-    <value>/hadoop/hbase</value>
-    <description>Temporary directory on the local filesystem.
-      Change this setting to point to a location more permanent
-      than '/tmp' (The '/tmp' directory is often cleared on
-      machine restart).
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.bindAddress</name>
-    <value></value>
-    <description>The bind address for the HBase Master web UI
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.port</name>
-    <value></value>
-    <description>The port for the HBase Master web UI.</description>
-  </property>
-  <property>
-    <name>hbase.regionserver.info.port</name>
-    <value></value>
-    <description>The port for the HBase RegionServer web UI.</description>
-  </property>
-  <property>
-    <name>hbase.regionserver.global.memstore.upperLimit</name>
-    <value>0.4</value>
-    <description>Maximum size of all memstores in a region server before new
-      updates are blocked and flushes are forced. Defaults to 40% of heap
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.handler.count</name>
-    <value>60</value>
-    <description>Count of RPC Listener instances spun up on RegionServers.
-      Same property is used by the Master for count of master handlers.
-      Default is 10.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.majorcompaction</name>
-    <value>86400000</value>
-    <description>The time (in milliseconds) between 'major' compactions of all
-      HStoreFiles in a region.  Default: 1 day.
-      Set to 0 to disable automated major compactions.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.regionserver.global.memstore.lowerLimit</name>
-    <value>0.38</value>
-    <description>When memstores are being forced to flush to make room in
-      memory, keep flushing until we hit this mark. Defaults to 35% of heap.
-      This value equal to hbase.regionserver.global.memstore.upperLimit causes
-      the minimum possible flushing to occur when updates are blocked due to
-      memstore limiting.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.block.multiplier</name>
-    <value>2</value>
-    <description>Block updates if memstore has hbase.hregion.memstore.block.multiplier
-      time hbase.hregion.flush.size bytes.  Useful preventing
-      runaway memstore during spikes in update traffic.  Without an
-      upper-bound, memstore fills such that when it flushes the
-      resultant flush files take a long time to compact or split, or
-      worse, we OOME
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.flush.size</name>
-    <value>134217728</value>
-    <description>
-      Memstore will be flushed to disk if size of the memstore
-      exceeds this number of bytes.  Value is checked by a thread that runs
-      every hbase.server.thread.wakefrequency.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.mslab.enabled</name>
-    <value>true</value>
-    <description>
-      Enables the MemStore-Local Allocation Buffer,
-      a feature which works to prevent heap fragmentation under
-      heavy write loads. This can reduce the frequency of stop-the-world
-      GC pauses on large heaps.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.max.filesize</name>
-    <value>10737418240</value>
-    <description>
-      Maximum HStoreFile size. If any one of a column families' HStoreFiles has
-      grown to exceed this value, the hosting HRegion is split in two.
-      Default: 1G.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.scanner.caching</name>
-    <value>100</value>
-    <description>Number of rows that will be fetched when calling next
-      on a scanner if it is not served from (local, client) memory. Higher
-      caching values will enable faster scanners but will eat up more memory
-      and some calls of next may take longer and longer times when the cache is empty.
-      Do not set this value such that the time between invocations is greater
-      than the scanner timeout; i.e. hbase.regionserver.lease.period
-    </description>
-  </property>
-  <property>
-    <name>zookeeper.session.timeout</name>
-    <value>60000</value>
-    <description>ZooKeeper session timeout.
-      HBase passes this to the zk quorum as suggested maximum time for a
-      session (This setting becomes zookeeper's 'maxSessionTimeout').  See
-      http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
-      "The client sends a requested timeout, the server responds with the
-      timeout that it can give the client. " In milliseconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.keyvalue.maxsize</name>
-    <value>10485760</value>
-    <description>Specifies the combined maximum allowed size of a KeyValue
-      instance. This is to set an upper boundary for a single entry saved in a
-      storage file. Since they cannot be split it helps avoiding that a region
-      cannot be split any further because the data is too large. It seems wise
-      to set this to a fraction of the maximum region size. Setting it to zero
-      or less disables the check.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.compactionThreshold</name>
-    <value>3</value>
-    <description>
-      If more than this number of HStoreFiles in any one HStore
-      (one HStoreFile is written per flush of memstore) then a compaction
-      is run to rewrite all HStoreFiles files as one.  Larger numbers
-      put off compaction but when it runs, it takes longer to complete.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.blockingStoreFiles</name>
-    <value>10</value>
-    <description>
-      If more than this number of StoreFiles in any one Store
-      (one StoreFile is written per flush of MemStore) then updates are
-      blocked for this HRegion until a compaction is completed, or
-      until hbase.hstore.blockingWaitTime has been exceeded.
-    </description>
-  </property>
-  <property>
-    <name>hfile.block.cache.size</name>
-    <value>0.40</value>
-    <description>
-      Percentage of maximum heap (-Xmx setting) to allocate to block cache
-      used by HFile/StoreFile. Default of 0.25 means allocate 25%.
-      Set to 0 to disable but it's not recommended.
-    </description>
-  </property>
-
-  <!-- The following properties configure authentication information for
-       HBase processes when using Kerberos security.  There are no default
-       values, included here for documentation purposes -->
-  <property>
-    <name>hbase.master.keytab.file</name>
-    <value></value>
-    <description>Full path to the kerberos keytab file to use for logging in
-      the configured HMaster server principal.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.kerberos.principal</name>
-    <value></value>
-    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
-      that should be used to run the HMaster process.  The principal name should
-      be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the hostname
-      portion, it will be replaced with the actual hostname of the running
-      instance.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.keytab.file</name>
-    <value></value>
-    <description>Full path to the kerberos keytab file to use for logging in
-      the configured HRegionServer server principal.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.kerberos.principal</name>
-    <value></value>
-    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
-      that should be used to run the HRegionServer process.  The principal name
-      should be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the
-      hostname portion, it will be replaced with the actual hostname of the
-      running instance.  An entry for this principal must exist in the file
-      specified in hbase.regionserver.keytab.file
-    </description>
-  </property>
-
-  <!-- Additional configuration specific to HBase security -->
-  <property>
-    <name>hbase.superuser</name>
-    <value>hbase</value>
-    <description>List of users or groups (comma-separated), who are allowed
-      full privileges, regardless of stored ACLs, across the cluster.
-      Only used when HBase security is enabled.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.security.authentication</name>
-    <value>simple</value>
-  </property>
-
-  <property>
-    <name>hbase.rpc.engine</name>
-    <value>org.apache.hadoop.hbase.ipc.WritableRpcEngine</value>
-  </property>
-
-  <property>
-    <name>hbase.security.authorization</name>
-    <value>false</value>
-    <description>Enables HBase authorization. Set the value of this property to false to disable HBase authorization.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.region.classes</name>
-    <value></value>
-    <description>A comma-separated list of Coprocessors that are loaded by
-      default on all tables. For any override coprocessor method, these classes
-      will be called in order. After implementing your own Coprocessor, just put
-      it in HBase's classpath and add the fully qualified class name here.
-      A coprocessor can also be loaded on demand by setting HTableDescriptor.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.master.classes</name>
-    <value></value>
-    <description>A comma-separated list of
-      org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
-      loaded by default on the active HMaster process. For any implemented
-      coprocessor methods, the listed classes will be called in order. After
-      implementing your own MasterObserver, just put it in HBase's classpath
-      and add the fully qualified class name here.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.zookeeper.property.clientPort</name>
-    <value>2181</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-      The port at which the clients will connect.
-    </description>
-  </property>
-
-  <!--
-  The following three properties are used together to create the list of
-  host:peer_port:leader_port quorum servers for ZooKeeper.
-  -->
-  <property>
-    <name>hbase.zookeeper.quorum</name>
-    <value>localhost</value>
-    <description>Comma separated list of servers in the ZooKeeper Quorum.
-      For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
-      By default this is set to localhost for local and pseudo-distributed modes
-      of operation. For a fully-distributed setup, this should be set to a full
-      list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
-      this is the list of servers which we will start/stop ZooKeeper on.
-    </description>
-  </property>
-  <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
-
-  <property>
-    <name>dfs.support.append</name>
-    <value>true</value>
-    <description>Does HDFS allow appends to files?
-      This is an hdfs config. set in here so the hdfs client will do append support.
-      You must ensure that this config. is true serverside too when running hbase
-      (You will have to restart your cluster after setting it).
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.client.read.shortcircuit</name>
-    <value>true</value>
-    <description>Enable/Disable short circuit read for your client.
-      Hadoop servers should be configured to allow short circuit read
-      for the hbase user for this to take effect
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.client.read.shortcircuit.skip.checksum</name>
-    <value></value>
-    <description>Enable/disbale skipping the checksum check</description>
-  </property>
-
-  <property>
-    <name>hbase.zookeeper.useMulti</name>
-    <value>true</value>
-    <description>Instructs HBase to make use of ZooKeeper's multi-update functionality.
-      This allows certain ZooKeeper operations to complete more quickly and prevents some issues
-      with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).ยท
-      IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+
-      and will not be downgraded.  ZooKeeper versions before 3.4 do not support multi-update and will
-      not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).
-    </description>
-  </property>
-  <property>
-    <name>zookeeper.znode.parent</name>
-    <value>/hbase-unsecure</value>
-    <description>Root ZNode for HBase in ZooKeeper. All of HBase's ZooKeeper
-      files that are configured with a relative path will go under this node.
-      By default, all of HBase's ZooKeeper file path are configured with a
-      relative path, so they will all go under this directory unless changed.
-    </description>
-  </property>
-
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HBASE/metainfo.xml
deleted file mode 100644
index ea7ea41..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HBASE/metainfo.xml
+++ /dev/null
@@ -1,40 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>mapred</user>
-    <comment>Non-relational distributed database and centralized service for configuration management &amp; synchronization</comment>
-    <version>0.94.6.1.3.2.0</version>
-
-    <components>
-        <component>
-            <name>HBASE_MASTER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>HBASE_REGIONSERVER</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>HBASE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HCATALOG/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HCATALOG/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HCATALOG/configuration/global.xml
deleted file mode 100644
index dd89409..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HCATALOG/configuration/global.xml
+++ /dev/null
@@ -1,45 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>hcat_log_dir</name>
-    <value>/var/log/webhcat</value>
-    <description>WebHCat Log Dir.</description>
-  </property>
-  <property>
-    <name>hcat_pid_dir</name>
-    <value>/etc/run/webhcat</value>
-    <description>WebHCat Pid Dir.</description>
-  </property>
-  <property>
-    <name>hcat_user</name>
-    <value>hcat</value>
-    <description>HCat User.</description>
-  </property>
-  <property>
-    <name>webhcat_user</name>
-    <value>hcat</value>
-    <description>WebHCat User.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HCATALOG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HCATALOG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HCATALOG/metainfo.xml
deleted file mode 100644
index 818914e..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HCATALOG/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for HCATALOG service</comment>
-    <version>0.11.0.1.3.2.0</version>
-
-    <components>
-        <component>
-            <name>HCAT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/core-site.xml
deleted file mode 100644
index 731d984..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/core-site.xml
+++ /dev/null
@@ -1,253 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-  <!-- i/o properties -->
-
-  <property>
-    <name>io.file.buffer.size</name>
-    <value>131072</value>
-    <description>The size of buffer for use in sequence files.
-      The size of this buffer should probably be a multiple of hardware
-      page size (4096 on Intel x86), and it determines how much data is
-      buffered during read and write operations.</description>
-  </property>
-
-  <property>
-    <name>io.serializations</name>
-    <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
-  </property>
-
-  <property>
-    <name>io.compression.codecs</name>
-    <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.SnappyCodec</value>
-    <description>A list of the compression codec classes that can be used
-      for compression/decompression.</description>
-  </property>
-
-
-  <property>
-    <name>io.compression.codec.lzo.class</name>
-    <value>com.hadoop.compression.lzo.LzoCodec</value>
-    <description>The implementation for lzo codec.</description>
-  </property>
-
-
-  <!-- file system properties -->
-
-  <property>
-    <name>fs.default.name</name>
-    <!-- cluster variant -->
-    <value>hdfs://localhost:8020</value>
-    <description>The name of the default file system.  Either the
-      literal string "local" or a host:port for NDFS.</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>fs.trash.interval</name>
-    <value>360</value>
-    <description>Number of minutes between trash checkpoints.
-      If zero, the trash feature is disabled.
-    </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.dir</name>
-    <value>/hadoop/hdfs/namesecondary</value>
-    <description>Determines where on the local filesystem the DFS secondary
-      name node should store the temporary images to merge.
-      If this is a comma-delimited list of directories then the image is
-      replicated in all of the directories for redundancy.
-    </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.edits.dir</name>
-    <value>${fs.checkpoint.dir}</value>
-    <description>Determines where on the local filesystem the DFS secondary
-      name node should store the temporary edits to merge.
-      If this is a comma-delimited list of directoires then teh edits is
-      replicated in all of the directoires for redundancy.
-      Default value is same as fs.checkpoint.dir
-    </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.period</name>
-    <value>21600</value>
-    <description>The number of seconds between two periodic checkpoints.
-    </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.size</name>
-    <value>67108864</value>
-    <description>The size of the current edit log (in bytes) that triggers
-      a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
-    </description>
-  </property>
-
-  <!-- ipc properties: copied from kryptonite configuration -->
-  <property>
-    <name>ipc.client.idlethreshold</name>
-    <value>8000</value>
-    <description>Defines the threshold number of connections after which
-      connections will be inspected for idleness.
-    </description>
-  </property>
-
-  <property>
-    <name>ipc.client.connection.maxidletime</name>
-    <value>30000</value>
-    <description>The maximum time after which a client will bring down the
-      connection to the server.
-    </description>
-  </property>
-
-  <property>
-    <name>ipc.client.connect.max.retries</name>
-    <value>50</value>
-    <description>Defines the maximum number of retries for IPC connections.</description>
-  </property>
-
-  <!-- Web Interface Configuration -->
-  <property>
-    <name>webinterface.private.actions</name>
-    <value>false</value>
-    <description> If set to true, the web interfaces of JT and NN may contain
-      actions, such as kill job, delete file, etc., that should
-      not be exposed to public. Enable this option if the interfaces
-      are only reachable by those who have the right authorization.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.security.authentication</name>
-    <value>simple</value>
-    <description>
-      Set the authentication for the cluster. Valid values are: simple or
-      kerberos.
-    </description>
-  </property>
-  <property>
-    <name>hadoop.security.authorization</name>
-    <value></value>
-    <description>
-      Enable authorization for different protocols.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.security.auth_to_local</name>
-    <value></value>
-    <description>The mapping from kerberos principal names to local OS user names.
-      So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
-      "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
-      The translations rules have 3 sections:
-      base     filter    substitution
-      The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
-
-      [1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
-      [2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
-      [2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
-
-      The filter is a regex in parens that must the generated string for the rule to apply.
-
-      "(.*%admin)" will take any string that ends in "%admin"
-      "(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
-
-      Finally, the substitution is a sed rule to translate a regex into a fixed string.
-
-      "s/@ACME\.COM//" removes the first instance of "@ACME.COM".
-      "s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
-      "s/X/Y/g" replaces all of the "X" in the name with "Y"
-
-      So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
-
-      RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-      DEFAULT
-
-      To also translate the names with a second component, you'd make the rules:
-
-      RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-      RULE:[2:$1@$0](.@ACME.ORG)s/@.//
-      DEFAULT
-
-      If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
-
-      RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
-      DEFAULT
-    </description>
-  </property>
-
-  <!--
-  <property>
-    <name>hadoop.proxyuser.scope.function_hdp_user("hcat_user").groups</name>
-    <value></value>
-    <description>
-       Proxy group for Hadoop.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.proxyuser.scope.function_hdp_user("hcat_user").hosts</name>
-    <value></value>
-    <description>
-       Proxy host for Hadoop.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.proxyuser.scope.function_hdp_user("oozie_user").groups</name>
-    <value></value>
-    <description>
-       Proxy group for Hadoop.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.proxyuser.scope.function_hdp_user("oozie_user").hosts</name>
-    <value></value>
-    <description>
-       Proxy host for Hadoop.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.proxyuser.scope.function_hdp_user("templeton_user").groups</name>
-    <value></value>
-    <description>
-      Proxy group for templeton.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.proxyuser.scope.function_hdp_user("templeton_user").hosts</name>
-    <value></value>
-    <description>
-      Proxy host for templeton.
-    </description>
-  </property>
-  -->
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/global.xml
deleted file mode 100644
index f10b9f9..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/global.xml
+++ /dev/null
@@ -1,187 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>namenode_host</name>
-    <value></value>
-    <description>NameNode Host.</description>
-  </property>
-  <property>
-    <name>dfs_name_dir</name>
-    <value>/hadoop/hdfs/namenode</value>
-    <description>NameNode Directories.</description>
-  </property>
-  <property>
-    <name>snamenode_host</name>
-    <value></value>
-    <description>Secondary NameNode.</description>
-  </property>
-  <property>
-    <name>fs_checkpoint_dir</name>
-    <value>/hadoop/hdfs/namesecondary</value>
-    <description>Secondary NameNode checkpoint dir.</description>
-  </property>
-  <property>
-    <name>datanode_hosts</name>
-    <value></value>
-    <description>List of Datanode Hosts.</description>
-  </property>
-  <property>
-    <name>dfs_data_dir</name>
-    <value>/hadoop/hdfs/data</value>
-    <description>Data directories for Data Nodes.</description>
-  </property>
-  <property>
-    <name>hdfs_log_dir_prefix</name>
-    <value>/var/log/hadoop</value>
-    <description>Hadoop Log Dir Prefix</description>
-  </property>
-  <property>
-    <name>hadoop_pid_dir_prefix</name>
-    <value>/var/run/hadoop</value>
-    <description>Hadoop PID Dir Prefix</description>
-  </property>
-  <property>
-    <name>dfs_webhdfs_enabled</name>
-    <value>true</value>
-    <description>WebHDFS enabled</description>
-  </property>
-  <property>
-    <name>hadoop_heapsize</name>
-    <value>1024</value>
-    <description>Hadoop maximum Java heap size</description>
-  </property>
-  <property>
-    <name>namenode_heapsize</name>
-    <value>1024</value>
-    <description>NameNode Java heap size</description>
-  </property>
-  <property>
-    <name>namenode_opt_newsize</name>
-    <value>200</value>
-    <description>NameNode new generation size</description>
-  </property>
-  <property>
-    <name>namenode_opt_maxnewsize</name>
-    <value>640</value>
-    <description>NameNode maximum new generation size</description>
-  </property>
-  <property>
-    <name>datanode_du_reserved</name>
-    <value>1</value>
-    <description>Reserved space for HDFS</description>
-  </property>
-  <property>
-    <name>dtnode_heapsize</name>
-    <value>1024</value>
-    <description>DataNode maximum Java heap size</description>
-  </property>
-  <property>
-    <name>dfs_datanode_failed_volume_tolerated</name>
-    <value>0</value>
-    <description>DataNode volumes failure toleration</description>
-  </property>
-  <property>
-    <name>fs_checkpoint_period</name>
-    <value>21600</value>
-    <description>HDFS Maximum Checkpoint Delay</description>
-  </property>
-  <property>
-    <name>fs_checkpoint_size</name>
-    <value>0.5</value>
-    <description>FS Checkpoint Size.</description>
-  </property>
-  <property>
-    <name>proxyuser_group</name>
-    <value>users</value>
-    <description>Proxy user group.</description>
-  </property>
-  <property>
-    <name>dfs_exclude</name>
-    <value></value>
-    <description>HDFS Exclude hosts.</description>
-  </property>
-  <property>
-    <name>dfs_include</name>
-    <value></value>
-    <description>HDFS Include hosts.</description>
-  </property>
-  <property>
-    <name>dfs_replication</name>
-    <value>3</value>
-    <description>Default Block Replication.</description>
-  </property>
-  <property>
-    <name>dfs_block_local_path_access_user</name>
-    <value>hbase</value>
-    <description>Default Block Replication.</description>
-  </property>
-  <property>
-    <name>dfs_datanode_address</name>
-    <value>50010</value>
-    <description>Port for datanode address.</description>
-  </property>
-  <property>
-    <name>dfs_datanode_http_address</name>
-    <value>50075</value>
-    <description>Port for datanode address.</description>
-  </property>
-  <property>
-    <name>dfs_datanode_data_dir_perm</name>
-    <value>750</value>
-    <description>Datanode dir perms.</description>
-  </property>
-
-  <property>
-    <name>security_enabled</name>
-    <value>false</value>
-    <description>Hadoop Security</description>
-  </property>
-  <property>
-    <name>kerberos_domain</name>
-    <value>EXAMPLE.COM</value>
-    <description>Kerberos realm.</description>
-  </property>
-  <property>
-    <name>kadmin_pw</name>
-    <value></value>
-    <description>Kerberos realm admin password</description>
-  </property>
-  <property>
-    <name>keytab_path</name>
-    <value>/etc/security/keytabs</value>
-    <description>Kerberos keytab path.</description>
-  </property>
-
-    <property>
-    <name>namenode_formatted_mark_dir</name>
-    <value>/var/run/hadoop/hdfs/namenode/formatted/</value>
-    <description>Formatteed Mark Directory.</description>
-  </property>
-    <property>
-    <name>hdfs_user</name>
-    <value>hdfs</value>
-    <description>User and Groups.</description>
-  </property>
-  
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/hadoop-policy.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/hadoop-policy.xml
deleted file mode 100644
index 900da99..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/hadoop-policy.xml
+++ /dev/null
@@ -1,134 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientProtocol, which is used by user code
-    via the DistributedFileSystem.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.client.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
-    for block recovery.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for DatanodeProtocol, which is used by datanodes to
-    communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
-    for updating generation timestamp.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.namenode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for NamenodeProtocol, the protocol used by the secondary
-    namenode to communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.tracker.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterTrackerProtocol, used by the tasktrackers to
-    communicate with the jobtracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.job.submission.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for JobSubmissionProtocol, used by job clients to
-    communciate with the jobtracker for job submission, querying job status etc.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.task.umbilical.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
-    tasks to communicate with the parent tasktracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
- <property>
-    <name>security.admin.operations.protocol.acl</name>
-    <value></value>
-    <description>ACL for AdminOperationsProtocol. Used for admin commands.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.refresh.usertogroups.mappings.protocol.acl</name>
-    <value></value>
-    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
-    users mappings. The ACL is a comma-separated list of user and
-    group names. The user and group list is separated by a blank. For
-    e.g. "alice,bob users,wheel".  A special value of "*" means all
-    users are allowed.</description>
-  </property>
-
-<property>
-    <name>security.refresh.policy.protocol.acl</name>
-    <value></value>
-    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
-    dfsadmin and mradmin commands to refresh the security policy in-effect.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-
-</configuration>


[13/14] AMBARI-3777. Remove HDPLocal stack from stack definition. (yusaku)

Posted by yu...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/capacity-scheduler.xml
deleted file mode 100644
index 8034d19..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/capacity-scheduler.xml
+++ /dev/null
@@ -1,195 +0,0 @@
-<?xml version="1.0"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- This is the configuration file for the resource manager in Hadoop. -->
-<!-- You can configure various scheduling parameters related to queues. -->
-<!-- The properties for a queue follow a naming convention,such as, -->
-<!-- mapred.capacity-scheduler.queue.<queue-name>.property-name. -->
-
-<configuration>
-
-  <property>
-    <name>mapred.capacity-scheduler.maximum-system-jobs</name>
-    <value>3000</value>
-    <description>Maximum number of jobs in the system which can be initialized,
-     concurrently, by the CapacityScheduler.
-    </description>    
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.capacity</name>
-    <value>100</value>
-    <description>Percentage of the number of slots in the cluster that are
-      to be available for jobs in this queue.
-    </description>    
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.maximum-capacity</name>
-    <value>-1</value>
-    <description>
-	maximum-capacity defines a limit beyond which a queue cannot use the capacity of the cluster.
-	This provides a means to limit how much excess capacity a queue can use. By default, there is no limit.
-	The maximum-capacity of a queue can only be greater than or equal to its minimum capacity.
-        Default value of -1 implies a queue can use complete capacity of the cluster.
-
-        This property could be to curtail certain jobs which are long running in nature from occupying more than a 
-        certain percentage of the cluster, which in the absence of pre-emption, could lead to capacity guarantees of 
-        other queues being affected.
-        
-        One important thing to note is that maximum-capacity is a percentage , so based on the cluster's capacity
-        the max capacity would change. So if large no of nodes or racks get added to the cluster , max Capacity in 
-        absolute terms would increase accordingly.
-    </description>    
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.supports-priority</name>
-    <value>false</value>
-    <description>If true, priorities of jobs will be taken into 
-      account in scheduling decisions.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.minimum-user-limit-percent</name>
-    <value>100</value>
-    <description> Each queue enforces a limit on the percentage of resources 
-    allocated to a user at any given time, if there is competition for them. 
-    This user limit can vary between a minimum and maximum value. The former
-    depends on the number of users who have submitted jobs, and the latter is
-    set to this property value. For example, suppose the value of this 
-    property is 25. If two users have submitted jobs to a queue, no single 
-    user can use more than 50% of the queue resources. If a third user submits
-    a job, no single user can use more than 33% of the queue resources. With 4 
-    or more users, no user can use more than 25% of the queue's resources. A 
-    value of 100 implies no user limits are imposed. 
-    </description>
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.user-limit-factor</name>
-    <value>1</value>
-    <description>The multiple of the queue capacity which can be configured to 
-    allow a single user to acquire more slots. 
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks</name>
-    <value>200000</value>
-    <description>The maximum number of tasks, across all jobs in the queue, 
-    which can be initialized concurrently. Once the queue's jobs exceed this 
-    limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks-per-user</name>
-    <value>100000</value>
-    <description>The maximum number of tasks per-user, across all the of the 
-    user's jobs in the queue, which can be initialized concurrently. Once the 
-    user's jobs exceed this limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.init-accept-jobs-factor</name>
-    <value>10</value>
-    <description>The multipe of (maximum-system-jobs * queue-capacity) used to 
-    determine the number of jobs which are accepted by the scheduler.  
-    </description>
-  </property>
-
-  <!-- The default configuration settings for the capacity task scheduler -->
-  <!-- The default values would be applied to all the queues which don't have -->
-  <!-- the appropriate property for the particular queue -->
-  <property>
-    <name>mapred.capacity-scheduler.default-supports-priority</name>
-    <value>false</value>
-    <description>If true, priorities of jobs will be taken into 
-      account in scheduling decisions by default in a job queue.
-    </description>
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.default-minimum-user-limit-percent</name>
-    <value>100</value>
-    <description>The percentage of the resources limited to a particular user
-      for the job queue at any given point of time by default.
-    </description>
-  </property>
-
-
-  <property>
-    <name>mapred.capacity-scheduler.default-user-limit-factor</name>
-    <value>1</value>
-    <description>The default multiple of queue-capacity which is used to 
-    determine the amount of slots a single user can consume concurrently.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.default-maximum-active-tasks-per-queue</name>
-    <value>200000</value>
-    <description>The default maximum number of tasks, across all jobs in the 
-    queue, which can be initialized concurrently. Once the queue's jobs exceed 
-    this limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.default-maximum-active-tasks-per-user</name>
-    <value>100000</value>
-    <description>The default maximum number of tasks per-user, across all the of 
-    the user's jobs in the queue, which can be initialized concurrently. Once 
-    the user's jobs exceed this limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.default-init-accept-jobs-factor</name>
-    <value>10</value>
-    <description>The default multipe of (maximum-system-jobs * queue-capacity) 
-    used to determine the number of jobs which are accepted by the scheduler.  
-    </description>
-  </property>
-
-  <!-- Capacity scheduler Job Initialization configuration parameters -->
-  <property>
-    <name>mapred.capacity-scheduler.init-poll-interval</name>
-    <value>5000</value>
-    <description>The amount of time in miliseconds which is used to poll 
-    the job queues for jobs to initialize.
-    </description>
-  </property>
-  <property>
-    <name>mapred.capacity-scheduler.init-worker-threads</name>
-    <value>5</value>
-    <description>Number of worker threads which would be used by
-    Initialization poller to initialize jobs in a set of queue.
-    If number mentioned in property is equal to number of job queues
-    then a single thread would initialize jobs in a queue. If lesser
-    then a thread would get a set of queues assigned. If the number
-    is greater then number of threads would be equal to number of 
-    job queues.
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/core-site.xml
deleted file mode 100644
index 3a2af49..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/core-site.xml
+++ /dev/null
@@ -1,20 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<configuration>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/mapred-queue-acls.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/mapred-queue-acls.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/mapred-queue-acls.xml
deleted file mode 100644
index ce12380..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/mapred-queue-acls.xml
+++ /dev/null
@@ -1,39 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- mapred-queue-acls.xml -->
-<configuration>
-
-
-<!-- queue default -->
-
-  <property>
-    <name>mapred.queue.default.acl-submit-job</name>
-    <value>*</value>
-  </property>
-
-  <property>
-    <name>mapred.queue.default.acl-administer-jobs</name>
-    <value>*</value>
-  </property>
-
-  <!-- END ACLs -->
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/mapred-site.xml
deleted file mode 100644
index 604adb1..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/mapred-site.xml
+++ /dev/null
@@ -1,531 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-<!-- i/o properties -->
-
-  <property>
-    <name>io.sort.mb</name>
-    <value></value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>io.sort.record.percent</name>
-    <value>.2</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>io.sort.spill.percent</name>
-    <value></value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>io.sort.factor</name>
-    <value>100</value>
-    <description>No description</description>
-  </property>
-
-<!-- map/reduce properties -->
-
-<property>
-  <name>mapred.tasktracker.tasks.sleeptime-before-sigkill</name>
-  <value>250</value>
-  <description>Normally, this is the amount of time before killing
-  processes, and the recommended-default is 5.000 seconds - a value of
-  5000 here.  In this case, we are using it solely to blast tasks before
-  killing them, and killing them very quickly (1/4 second) to guarantee
-  that we do not leave VMs around for later jobs.
-  </description>
-</property>
-
-  <property>
-    <name>mapred.job.tracker.handler.count</name>
-    <value>50</value>
-    <description>
-    The number of server threads for the JobTracker. This should be roughly
-    4% of the number of tasktracker nodes.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.system.dir</name>
-    <value></value>
-    <description>No description</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>No description</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker.http.address</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>No description</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <!-- cluster specific -->
-    <name>mapred.local.dir</name>
-    <value></value>
-    <description>No description</description>
-    <final>true</final>
-  </property>
-
-  <property>
-  <name>mapreduce.cluster.administrators</name>
-  <value> hadoop</value>
-  </property>
-
-  <property>
-    <name>mapred.reduce.parallel.copies</name>
-    <value>30</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.tasktracker.map.tasks.maximum</name>
-    <value></value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.tasktracker.reduce.tasks.maximum</name>
-    <value></value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>tasktracker.http.threads</name>
-    <value>50</value>
-  </property>
-
-  <property>
-    <name>mapred.map.tasks.speculative.execution</name>
-    <value>false</value>
-    <description>If true, then multiple instances of some map tasks
-               may be executed in parallel.</description>
-  </property>
-
-  <property>
-    <name>mapred.reduce.tasks.speculative.execution</name>
-    <value>false</value>
-    <description>If true, then multiple instances of some reduce tasks
-               may be executed in parallel.</description>
-  </property>
-
-  <property>
-    <name>mapred.reduce.slowstart.completed.maps</name>
-    <value>0.05</value>
-  </property>
-
-  <property>
-    <name>mapred.inmem.merge.threshold</name>
-    <value>1000</value>
-    <description>The threshold, in terms of the number of files
-  for the in-memory merge process. When we accumulate threshold number of files
-  we initiate the in-memory merge and spill to disk. A value of 0 or less than
-  0 indicates we want to DON'T have any threshold and instead depend only on
-  the ramfs's memory consumption to trigger the merge.
-  </description>
-  </property>
-
-  <property>
-    <name>mapred.job.shuffle.merge.percent</name>
-    <value>0.66</value>
-    <description>The usage threshold at which an in-memory merge will be
-  initiated, expressed as a percentage of the total memory allocated to
-  storing in-memory map outputs, as defined by
-  mapred.job.shuffle.input.buffer.percent.
-  </description>
-  </property>
-
-  <property>
-    <name>mapred.job.shuffle.input.buffer.percent</name>
-    <value>0.7</value>
-    <description>The percentage of memory to be allocated from the maximum heap
-  size to storing map outputs during the shuffle.
-  </description>
-  </property>
-
-  <property>
-    <name>mapred.map.output.compression.codec</name>
-    <value></value>
-    <description>If the map outputs are compressed, how should they be
-      compressed
-    </description>
-  </property>
-
-<property>
-  <name>mapred.output.compression.type</name>
-  <value>BLOCK</value>
-  <description>If the job outputs are to compressed as SequenceFiles, how should
-               they be compressed? Should be one of NONE, RECORD or BLOCK.
-  </description>
-</property>
-
-
-  <property>
-    <name>mapred.jobtracker.completeuserjobs.maximum</name>
-    <value>5</value>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.taskScheduler</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.restart.recover</name>
-    <value>false</value>
-    <description>"true" to enable (job) recovery upon restart,
-               "false" to start afresh
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.reduce.input.buffer.percent</name>
-    <value>0.0</value>
-    <description>The percentage of memory- relative to the maximum heap size- to
-  retain map outputs during the reduce. When the shuffle is concluded, any
-  remaining map outputs in memory must consume less than this threshold before
-  the reduce can begin.
-  </description>
-  </property>
-
- <property>
-  <name>mapreduce.reduce.input.limit</name>
-  <value>10737418240</value>
-  <description>The limit on the input size of the reduce. (This value
-  is 10 Gb.)  If the estimated input size of the reduce is greater than
-  this value, job is failed. A value of -1 means that there is no limit
-  set. </description>
-</property>
-
-
-  <!-- copied from kryptonite configuration -->
-  <property>
-    <name>mapred.compress.map.output</name>
-    <value></value>
-  </property>
-
-
-  <property>
-    <name>mapred.task.timeout</name>
-    <value>600000</value>
-    <description>The number of milliseconds before a task will be
-  terminated if it neither reads an input, writes an output, nor
-  updates its status string.
-  </description>
-  </property>
-
-  <property>
-    <name>jetty.connector</name>
-    <value>org.mortbay.jetty.nio.SelectChannelConnector</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.task.tracker.task-controller</name>
-    <value></value>
-   <description>
-     TaskController which is used to launch and manage task execution.
-  </description>
-  </property>
-
-  <property>
-    <name>mapred.child.root.logger</name>
-    <value>INFO,TLA</value>
-  </property>
-
-  <property>
-    <name>mapred.child.java.opts</name>
-    <value></value>
-
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.cluster.map.memory.mb</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.cluster.reduce.memory.mb</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.job.map.memory.mb</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.job.reduce.memory.mb</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.cluster.max.map.memory.mb</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.cluster.max.reduce.memory.mb</name>
-    <value></value>
-  </property>
-
-<property>
-  <name>mapred.hosts</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.hosts.exclude</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.max.tracker.blacklists</name>
-  <value>16</value>
-  <description>
-    if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted
-  </description>
-</property>
-
-<property>
-  <name>mapred.healthChecker.script.path</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.healthChecker.interval</name>
-  <value>135000</value>
-</property>
-
-<property>
-  <name>mapred.healthChecker.script.timeout</name>
-  <value>60000</value>
-</property>
-
-<property>
-  <name>mapred.job.tracker.persist.jobstatus.active</name>
-  <value>false</value>
-  <description>Indicates if persistency of job status information is
-  active or not.
-  </description>
-</property>
-
-<property>
-  <name>mapred.job.tracker.persist.jobstatus.hours</name>
-  <value>1</value>
-  <description>The number of hours job status information is persisted in DFS.
-    The job status information will be available after it drops of the memory
-    queue and between jobtracker restarts. With a zero value the job status
-    information is not persisted at all in DFS.
-  </description>
-</property>
-
-<property>
-  <name>mapred.job.tracker.persist.jobstatus.dir</name>
-  <value></value>
-  <description>The directory where the job status information is persisted
-   in a file system to be available after it drops of the memory queue and
-   between jobtracker restarts.
-  </description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.retirejob.check</name>
-  <value>10000</value>
-</property>
-
-<property>
-  <name>mapred.jobtracker.retirejob.interval</name>
-  <value>21600000</value>
-</property>
-
-<property>
-  <name>mapred.job.tracker.history.completed.location</name>
-  <value>/mapred/history/done</value>
-  <description>No description</description>
-</property>
-
-<property>
-  <name>mapred.task.maxvmem</name>
-  <value></value>
-  <final>true</final>
-   <description>No description</description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.maxtasks.per.job</name>
-  <value></value>
-  <final>true</final>
-  <description>The maximum number of tasks for a single job.
-  A value of -1 indicates that there is no maximum.  </description>
-</property>
-
-<property>
-  <name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name>
-  <value>false</value>
-</property>
-
-<property>
-  <name>mapred.userlog.retain.hours</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.job.reuse.jvm.num.tasks</name>
-  <value>1</value>
-  <description>
-    How many tasks to run per jvm. If set to -1, there is no limit
-  </description>
-  <final>true</final>
-</property>
-
-<property>
-  <name>mapreduce.jobtracker.kerberos.principal</name>
-  <value></value>
-  <description>
-      JT user name key.
- </description>
-</property>
-
-<property>
-  <name>mapreduce.tasktracker.kerberos.principal</name>
-   <value></value>
-  <description>
-       tt user name key. "_HOST" is replaced by the host name of the task tracker.
-   </description>
-</property>
-
-
-  <property>
-    <name>hadoop.job.history.user.location</name>
-    <value>none</value>
-    <final>true</final>
-  </property>
-
-
- <property>
-   <name>mapreduce.jobtracker.keytab.file</name>
-   <value></value>
-   <description>
-       The keytab for the jobtracker principal.
-   </description>
-
-</property>
-
- <property>
-   <name>mapreduce.tasktracker.keytab.file</name>
-   <value></value>
-    <description>The filename of the keytab for the task tracker</description>
- </property>
-
- <property>
-   <name>mapreduce.jobtracker.staging.root.dir</name>
-   <value>/user</value>
- <description>The Path prefix for where the staging directories should be placed. The next level is always the user's
-   name. It is a path in the default file system.</description>
- </property>
-
- <property>
-      <name>mapreduce.tasktracker.group</name>
-      <value>hadoop</value>
-      <description>The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.</description>
-
- </property>
-
-  <property>
-    <name>mapreduce.jobtracker.split.metainfo.maxsize</name>
-    <value>50000000</value>
-    <final>true</final>
-     <description>If the size of the split metainfo file is larger than this, the JobTracker will fail the job during
-    initialize.
-   </description>
-  </property>
-  <property>
-    <name>mapreduce.history.server.embedded</name>
-    <value>false</value>
-    <description>Should job history server be embedded within Job tracker
-process</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapreduce.history.server.http.address</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>Http address of the history server</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapreduce.jobhistory.kerberos.principal</name>
-    <!-- cluster variant -->
-  <value></value>
-    <description>Job history user name key. (must map to same user as JT
-user)</description>
-  </property>
-
- <property>
-   <name>mapreduce.jobhistory.keytab.file</name>
-    <!-- cluster variant -->
-   <value></value>
-   <description>The keytab for the job history server principal.</description>
- </property>
-
-<property>
-  <name>mapred.jobtracker.blacklist.fault-timeout-window</name>
-  <value>180</value>
-  <description>
-    3-hour sliding window (value is in minutes)
-  </description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.blacklist.fault-bucket-width</name>
-  <value>15</value>
-  <description>
-    15-minute bucket size (value is in minutes)
-  </description>
-</property>
-
-<property>
-  <name>mapred.queue.names</name>
-  <value>default</value>
-  <description> Comma separated list of queues configured for this jobtracker.</description>
-</property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/metainfo.xml
deleted file mode 100644
index 79d219b..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/metainfo.xml
+++ /dev/null
@@ -1,41 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>mapred</user>
-    <comment>Apache Hadoop Distributed Processing Framework</comment>
-    <version>1.1.2</version>
-
-    <components>
-        <component>
-            <name>JOBTRACKER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>TASKTRACKER</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>MAPREDUCE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/NAGIOS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/NAGIOS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/NAGIOS/metainfo.xml
deleted file mode 100644
index bd7de07..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/NAGIOS/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Nagios Monitoring and Alerting system</comment>
-    <version>3.2.3</version>
-
-    <components>
-        <component>
-            <name>NAGIOS_SERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/OOZIE/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/OOZIE/configuration/oozie-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/OOZIE/configuration/oozie-site.xml
deleted file mode 100644
index 1665ba8..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/OOZIE/configuration/oozie-site.xml
+++ /dev/null
@@ -1,245 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-        
-       http://www.apache.org/licenses/LICENSE-2.0
-  
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->     
-
-<configuration>
-
-<!--
-    Refer to the oozie-default.xml file for the complete list of
-    Oozie configuration properties and their default values.
--->
-  <property>
-    <name>oozie.base.url</name>
-    <value>http://localhost:11000/oozie</value>
-    <description>Base Oozie URL.</description>
-   </property>
-
-  <property>
-    <name>oozie.system.id</name>
-    <value>oozie-${user.name}</value>
-    <description>
-    The Oozie system ID.
-    </description>
-   </property>
-
-   <property>
-     <name>oozie.systemmode</name>
-     <value>NORMAL</value>
-     <description>
-     System mode for  Oozie at startup.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.AuthorizationService.security.enabled</name>
-     <value>true</value>
-     <description>
-     Specifies whether security (user name/admin role) is enabled or not.
-     If disabled any user can manage Oozie system and manage any job.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.PurgeService.older.than</name>
-     <value>30</value>
-     <description>
-     Jobs older than this value, in days, will be purged by the PurgeService.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.PurgeService.purge.interval</name>
-     <value>3600</value>
-     <description>
-     Interval at which the purge service will run, in seconds.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.queue.size</name>
-     <value>1000</value>
-     <description>Max callable queue size</description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.threads</name>
-     <value>10</value>
-     <description>Number of threads used for executing callables</description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.callable.concurrency</name>
-     <value>3</value>
-     <description>
-     Maximum concurrency for a given callable type.
-     Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
-     Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
-     All commands that use action executors (action-start, action-end, action-kill and action-check) use
-     the action type as the callable type.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.coord.normal.default.timeout</name>
-     <value>120</value>
-     <description>Default timeout for a coordinator action input check (in minutes) for normal job.
-      -1 means infinite timeout</description>
-   </property>
-
-   <property>
-     <name>oozie.db.schema.name</name>
-     <value>oozie</value>
-     <description>
-      Oozie DataBase Name
-     </description>
-   </property>
-
-    <property>
-      <name>oozie.service.HadoopAccessorService.jobTracker.whitelist</name>
-      <value> </value>
-      <description>
-      Whitelisted job tracker for Oozie service.
-      </description>
-    </property>
-   
-    <property>
-      <name>oozie.authentication.type</name>
-      <value>simple</value>
-      <description>
-      </description>
-    </property>
-   
-    <property>
-      <name>oozie.service.HadoopAccessorService.nameNode.whitelist</name>
-      <value> </value>
-      <description>
-      </description>
-    </property>
-
-    <property>
-      <name>oozie.service.WorkflowAppService.system.libpath</name>
-      <value>/user/${user.name}/share/lib</value>
-      <description>
-      System library path to use for workflow applications.
-      This path is added to workflow application if their job properties sets
-      the property 'oozie.use.system.libpath' to true.
-      </description>
-    </property>
-
-    <property>
-      <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
-      <value>false</value>
-      <description>
-      If set to true, submissions of MapReduce and Pig jobs will include
-      automatically the system library path, thus not requiring users to
-      specify where the Pig JAR files are. Instead, the ones from the system
-      library path are used.
-      </description>
-    </property>
-    <property>
-      <name>oozie.authentication.kerberos.name.rules</name>
-      <value>
-        RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/
-        RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/
-        RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
-        RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
-        DEFAULT
-        </value>
-      <description>The mapping from kerberos principal names to local OS user names.</description>
-    </property>
-    <property>
-      <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
-      <value>*=/etc/hadoop/conf</value>
-      <description>
-          Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
-          the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
-          used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
-          the relevant Hadoop *-site.xml files. If the path is relative is looked within
-          the Oozie configuration directory; though the path can be absolute (i.e. to point
-          to Hadoop client conf/ directories in the local filesystem.
-      </description>
-    </property>
-    <property>
-        <name>oozie.service.ActionService.executor.ext.classes</name>
-        <value>
-            org.apache.oozie.action.email.EmailActionExecutor,
-            org.apache.oozie.action.hadoop.HiveActionExecutor,
-            org.apache.oozie.action.hadoop.ShellActionExecutor,
-            org.apache.oozie.action.hadoop.SqoopActionExecutor,
-            org.apache.oozie.action.hadoop.DistcpActionExecutor
-        </value>
-    </property>
-
-    <property>
-        <name>oozie.service.SchemaService.wf.ext.schemas</name>
-        <value>shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd</value>
-    </property>
-    <property>
-        <name>oozie.service.JPAService.create.db.schema</name>
-        <value>false</value>
-        <description>
-            Creates Oozie DB.
-
-            If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
-            If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.driver</name>
-        <value>org.apache.derby.jdbc.EmbeddedDriver</value>
-        <description>
-            JDBC driver class.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.url</name>
-        <value>jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true</value>
-        <description>
-            JDBC URL.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.username</name>
-        <value>sa</value>
-        <description>
-            DB user name.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.password</name>
-        <value> </value>
-        <description>
-            DB user password.
-
-            IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
-                       if empty Configuration assumes it is NULL.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.pool.max.active.conn</name>
-        <value>10</value>
-        <description>
-             Max number of connections.
-        </description>
-    </property>
-</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/OOZIE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/OOZIE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/OOZIE/metainfo.xml
deleted file mode 100644
index 83ccb06..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/OOZIE/metainfo.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>System for workflow coordination and execution of Apache Hadoop jobs</comment>
-    <version>3.2.0</version>
-
-    <components>
-        <component>
-            <name>OOZIE_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>OOZIE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/PIG/configuration/pig.properties
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/PIG/configuration/pig.properties b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/PIG/configuration/pig.properties
deleted file mode 100644
index 01000b5..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/PIG/configuration/pig.properties
+++ /dev/null
@@ -1,52 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Pig default configuration file. All values can be overwritten by pig.properties and command line arguments.
-# see bin/pig -help
-
-# brief logging (no timestamps)
-brief=false
-
-#debug level, INFO is default
-debug=INFO
-
-#verbose print all log messages to screen (default to print only INFO and above to screen)
-verbose=false
-
-#exectype local|mapreduce, mapreduce is default
-exectype=mapreduce
-
-#Enable insertion of information about script into hadoop job conf 
-pig.script.info.enabled=true
-
-#Do not spill temp files smaller than this size (bytes)
-pig.spill.size.threshold=5000000
-#EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
-#This should help reduce the number of files being spilled.
-pig.spill.gc.activation.size=40000000
-
-#the following two parameters are to help estimate the reducer number
-pig.exec.reducers.bytes.per.reducer=1000000000
-pig.exec.reducers.max=999
-
-#Temporary location to store the intermediate data.
-pig.temp.dir=/tmp/
-
-#Threshold for merging FRJoin fragment files
-pig.files.concatenation.threshold=100
-pig.optimistic.files.concatenation=false;
-
-pig.disable.counter=false

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/PIG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/PIG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/PIG/metainfo.xml
deleted file mode 100644
index 4982fd2..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/PIG/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Scripting platform for analyzing large datasets</comment>
-    <version>0.10.1</version>
-
-    <components>
-        <component>
-            <name>PIG</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/SQOOP/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/SQOOP/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/SQOOP/metainfo.xml
deleted file mode 100644
index ae0e68b..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/SQOOP/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Tool for transferring bulk data between Apache Hadoop and structured data stores such as relational databases</comment>
-    <version>1.4.2</version>
-
-    <components>
-        <component>
-            <name>SQOOP</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/WEBHCAT/configuration/webhcat-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/WEBHCAT/configuration/webhcat-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/WEBHCAT/configuration/webhcat-site.xml
deleted file mode 100644
index 31d0113..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/WEBHCAT/configuration/webhcat-site.xml
+++ /dev/null
@@ -1,126 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- 
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<!-- The default settings for Templeton. -->
-<!-- Edit templeton-site.xml to change settings for your local -->
-<!-- install. -->
-
-<configuration>
-
-  <property>
-    <name>templeton.port</name>
-      <value>50111</value>
-    <description>The HTTP port for the main server.</description>
-  </property>
-
-  <property>
-    <name>templeton.hadoop.conf.dir</name>
-    <value>/etc/hadoop/conf</value>
-    <description>The path to the Hadoop configuration.</description>
-  </property>
-
-  <property>
-    <name>templeton.jar</name>
-    <value>/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar</value>
-    <description>The path to the Templeton jar file.</description>
-  </property>
-
-  <property>
-    <name>templeton.libjars</name>
-    <value>/usr/lib/zookeeper/zookeeper.jar</value>
-    <description>Jars to add the the classpath.</description>
-  </property>
-
-
-  <property>
-    <name>templeton.hadoop</name>
-    <value>/usr/bin/hadoop</value>
-    <description>The path to the Hadoop executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.archive</name>
-    <value>hdfs:///apps/webhcat/pig.tar.gz</value>
-    <description>The path to the Pig archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.path</name>
-    <value>pig.tar.gz/pig/bin/pig</value>
-    <description>The path to the Pig executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hcat</name>
-    <value>/usr/bin/hcat</value>
-    <description>The path to the hcatalog executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.archive</name>
-    <value>hdfs:///apps/webhcat/hive.tar.gz</value>
-    <description>The path to the Hive archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.path</name>
-    <value>hive.tar.gz/hive/bin/hive</value>
-    <description>The path to the Hive executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.properties</name>
-    <value></value>
-    <description>Properties to set when running hive.</description>
-  </property>
-
-
-  <property>
-    <name>templeton.zookeeper.hosts</name>
-    <value></value>
-    <description>ZooKeeper servers, as comma separated host:port pairs</description>
-  </property>
-
-  <property>
-    <name>templeton.storage.class</name>
-    <value>org.apache.hcatalog.templeton.tool.ZooKeeperStorage</value>
-    <description>The class to use as storage</description>
-  </property>
-
-  <property>
-   <name>templeton.override.enabled</name>
-   <value>false</value>
-   <description>
-     Enable the override path in templeton.override.jars
-   </description>
- </property>
-
- <property>
-    <name>templeton.streaming.jar</name>
-    <value>hdfs:///apps/webhcat/hadoop-streaming.jar</value>
-    <description>The hdfs path to the Hadoop streaming jar file.</description>
-  </property> 
-
-  <property>
-    <name>templeton.exec.timeout</name>
-    <value>60000</value>
-    <description>Time out for templeton api</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/WEBHCAT/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/WEBHCAT/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/WEBHCAT/metainfo.xml
deleted file mode 100644
index e65992f..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/WEBHCAT/metainfo.xml
+++ /dev/null
@@ -1,31 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for WEBHCAT service</comment>
-    <version>0.5.0</version>
-
-    <components>
-        <component>
-            <name>WEBHCAT_SERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/ZOOKEEPER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/ZOOKEEPER/metainfo.xml
deleted file mode 100644
index fc0c3b5..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/ZOOKEEPER/metainfo.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Centralized service which provides highly reliable distributed coordination</comment>
-    <version>3.4.5</version>
-
-    <components>
-        <component>
-            <name>ZOOKEEPER_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>ZOOKEEPER_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/repos/repoinfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/repos/repoinfo.xml
deleted file mode 100644
index e8f1855..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/repos/repoinfo.xml
+++ /dev/null
@@ -1,75 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<reposinfo>
-  <os type="centos6">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/1.x/updates/1.2.1</baseurl>
-      <repoid>HDP-1.2.1</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="centos5">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos5/1.x/updates/1.2.1</baseurl>
-      <repoid>HDP-1.2.1</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="redhat6">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/1.x/updates/1.2.1</baseurl>
-      <repoid>HDP-1.2.1</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="redhat5">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos5/1.x/updates/1.2.1</baseurl>
-      <repoid>HDP-1.2.1</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="oraclelinux6">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/1.x/updates/1.2.1</baseurl>
-      <repoid>HDP-1.2.1</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="oraclelinux5">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos5/1.x/updates/1.2.1</baseurl>
-      <repoid>HDP-1.2.1</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="suse11">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/suse11/1.x/updates/1.2.1</baseurl>
-      <repoid>HDP-1.2.1</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="sles11">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/suse11/1.x/updates/1.2.1</baseurl>
-      <repoid>HDP-1.2.1</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-</reposinfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/GANGLIA/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/GANGLIA/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/GANGLIA/metainfo.xml
deleted file mode 100644
index 0b21f0f..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/GANGLIA/metainfo.xml
+++ /dev/null
@@ -1,40 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Ganglia Metrics Collection system</comment>
-    <version>3.2.0</version>
-
-    <components>
-        <component>
-            <name>GANGLIA_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>GANGLIA_MONITOR</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>MONITOR_WEBSERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HBASE/configuration/hbase-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HBASE/configuration/hbase-policy.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HBASE/configuration/hbase-policy.xml
deleted file mode 100644
index e45f23c..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HBASE/configuration/hbase-policy.xml
+++ /dev/null
@@ -1,53 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HRegionInterface protocol implementations (ie. 
-    clients talking to HRegionServers)
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.admin.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HMasterInterface protocol implementation (ie. 
-    clients talking to HMaster for admin operations).
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.masterregion.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HMasterRegionInterface protocol implementations
-    (for HRegionServers communicating with HMaster)
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HBASE/configuration/hbase-site.xml
deleted file mode 100644
index 7710cb0..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HBASE/configuration/hbase-site.xml
+++ /dev/null
@@ -1,345 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>hbase.rootdir</name>
-    <value></value>
-    <description>The directory shared by region servers and into
-    which HBase persists.  The URL should be 'fully-qualified'
-    to include the filesystem scheme.  For example, to specify the
-    HDFS directory '/hbase' where the HDFS instance's namenode is
-    running at namenode.example.org on port 9000, set this value to:
-    hdfs://namenode.example.org:9000/hbase.  By default HBase writes
-    into /tmp.  Change this configuration else all data will be lost
-    on machine restart.
-    </description>
-  </property>
-  <property>
-    <name>hbase.cluster.distributed</name>
-    <value>true</value>
-    <description>The mode the cluster will be in. Possible values are
-      false for standalone mode and true for distributed mode.  If
-      false, startup will run all HBase and ZooKeeper daemons together
-      in the one JVM.
-    </description>
-  </property>
-  <property>
-    <name>hbase.tmp.dir</name>
-    <value></value>
-    <description>Temporary directory on the local filesystem.
-    Change this setting to point to a location more permanent
-    than '/tmp' (The '/tmp' directory is often cleared on
-    machine restart).
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.bindAddress</name>
-    <value></value>
-    <description>The bind address for the HBase Master web UI
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.global.memstore.upperLimit</name>
-    <value></value>
-    <description>Maximum size of all memstores in a region server before new
-      updates are blocked and flushes are forced. Defaults to 40% of heap
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.handler.count</name>
-    <value></value>
-    <description>Count of RPC Listener instances spun up on RegionServers.
-    Same property is used by the Master for count of master handlers.
-    Default is 10.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.majorcompaction</name>
-    <value></value>
-    <description>The time (in miliseconds) between 'major' compactions of all
-    HStoreFiles in a region.  Default: 1 day.
-    Set to 0 to disable automated major compactions.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.lease.thread.wakefrequency</name>
-    <value>3000</value>
-    <description>The interval between checks for expired region server leases.
-    This value has been reduced due to the other reduced values above so that
-    the master will notice a dead region server sooner. The default is 15 seconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.global.memstore.lowerLimit</name>
-    <value></value>
-    <description>When memstores are being forced to flush to make room in
-      memory, keep flushing until we hit this mark. Defaults to 35% of heap.
-      This value equal to hbase.regionserver.global.memstore.upperLimit causes
-      the minimum possible flushing to occur when updates are blocked due to
-      memstore limiting.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.block.multiplier</name>
-    <value></value>
-    <description>Block updates if memstore has hbase.hregion.memstore.block.multiplier
-    time hbase.hregion.flush.size bytes.  Useful preventing
-    runaway memstore during spikes in update traffic.  Without an
-    upper-bound, memstore fills such that when it flushes the
-    resultant flush files take a long time to compact or split, or
-    worse, we OOME
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.flush.size</name>
-    <value></value>
-    <description>
-    Memstore will be flushed to disk if size of the memstore
-    exceeds this number of bytes.  Value is checked by a thread that runs
-    every hbase.server.thread.wakefrequency.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.mslab.enabled</name>
-    <value></value>
-    <description>
-      Enables the MemStore-Local Allocation Buffer,
-      a feature which works to prevent heap fragmentation under
-      heavy write loads. This can reduce the frequency of stop-the-world
-      GC pauses on large heaps.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.max.filesize</name>
-    <value></value>
-    <description>
-    Maximum HStoreFile size. If any one of a column families' HStoreFiles has
-    grown to exceed this value, the hosting HRegion is split in two.
-    Default: 1G.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.scanner.caching</name>
-    <value></value>
-    <description>Number of rows that will be fetched when calling next
-    on a scanner if it is not served from (local, client) memory. Higher
-    caching values will enable faster scanners but will eat up more memory
-    and some calls of next may take longer and longer times when the cache is empty.
-    Do not set this value such that the time between invocations is greater
-    than the scanner timeout; i.e. hbase.regionserver.lease.period
-    </description>
-  </property>
-  <property>
-    <name>zookeeper.session.timeout</name>
-    <value></value>
-    <description>ZooKeeper session timeout.
-      HBase passes this to the zk quorum as suggested maximum time for a
-      session (This setting becomes zookeeper's 'maxSessionTimeout').  See
-      http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
-      "The client sends a requested timeout, the server responds with the
-      timeout that it can give the client. " In milliseconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.keyvalue.maxsize</name>
-    <value></value>
-    <description>Specifies the combined maximum allowed size of a KeyValue
-    instance. This is to set an upper boundary for a single entry saved in a
-    storage file. Since they cannot be split it helps avoiding that a region
-    cannot be split any further because the data is too large. It seems wise
-    to set this to a fraction of the maximum region size. Setting it to zero
-    or less disables the check.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.compactionThreshold</name>
-    <value></value>
-    <description>
-    If more than this number of HStoreFiles in any one HStore
-    (one HStoreFile is written per flush of memstore) then a compaction
-    is run to rewrite all HStoreFiles files as one.  Larger numbers
-    put off compaction but when it runs, it takes longer to complete.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.blockingStoreFiles</name>
-    <value></value>
-    <description>
-    If more than this number of StoreFiles in any one Store
-    (one StoreFile is written per flush of MemStore) then updates are
-    blocked for this HRegion until a compaction is completed, or
-    until hbase.hstore.blockingWaitTime has been exceeded.
-    </description>
-  </property>
-  <property>
-    <name>hfile.block.cache.size</name>
-    <value></value>
-    <description>
-        Percentage of maximum heap (-Xmx setting) to allocate to block cache
-        used by HFile/StoreFile. Default of 0.25 means allocate 25%.
-        Set to 0 to disable but it's not recommended.
-    </description>
-  </property>
-
-  <!-- The following properties configure authentication information for
-       HBase processes when using Kerberos security.  There are no default
-       values, included here for documentation purposes -->
-  <property>
-    <name>hbase.master.keytab.file</name>
-    <value></value>
-    <description>Full path to the kerberos keytab file to use for logging in
-    the configured HMaster server principal.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.kerberos.principal</name>
-    <value></value>
-    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
-    that should be used to run the HMaster process.  The principal name should
-    be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the hostname
-    portion, it will be replaced with the actual hostname of the running
-    instance.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.keytab.file</name>
-    <value></value>
-    <description>Full path to the kerberos keytab file to use for logging in
-    the configured HRegionServer server principal.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.kerberos.principal</name>
-    <value></value>
-    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
-    that should be used to run the HRegionServer process.  The principal name
-    should be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the
-    hostname portion, it will be replaced with the actual hostname of the
-    running instance.  An entry for this principal must exist in the file
-    specified in hbase.regionserver.keytab.file
-    </description>
-  </property>
-
-  <!-- Additional configuration specific to HBase security -->
-  <property>
-    <name>hbase.superuser</name>
-    <value>hbase</value>
-    <description>List of users or groups (comma-separated), who are allowed
-    full privileges, regardless of stored ACLs, across the cluster.
-    Only used when HBase security is enabled.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.region.classes</name>
-    <value></value>
-    <description>A comma-separated list of Coprocessors that are loaded by
-    default on all tables. For any override coprocessor method, these classes
-    will be called in order. After implementing your own Coprocessor, just put
-    it in HBase's classpath and add the fully qualified class name here.
-    A coprocessor can also be loaded on demand by setting HTableDescriptor.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.master.classes</name>
-    <value></value>
-    <description>A comma-separated list of
-      org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
-      loaded by default on the active HMaster process. For any implemented
-      coprocessor methods, the listed classes will be called in order. After
-      implementing your own MasterObserver, just put it in HBase's classpath
-      and add the fully qualified class name here.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.zookeeper.property.clientPort</name>
-    <value>2181</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-    The port at which the clients will connect.
-    </description>
-  </property>
-
-  <!--
-  The following three properties are used together to create the list of
-  host:peer_port:leader_port quorum servers for ZooKeeper.
-  -->
-  <property>
-    <name>hbase.zookeeper.quorum</name>
-    <value></value>
-    <description>Comma separated list of servers in the ZooKeeper Quorum.
-    For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
-    By default this is set to localhost for local and pseudo-distributed modes
-    of operation. For a fully-distributed setup, this should be set to a full
-    list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
-    this is the list of servers which we will start/stop ZooKeeper on.
-    </description>
-  </property>
-  <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
-
-  <property>
-    <name>dfs.support.append</name>
-    <value></value>
-    <description>Does HDFS allow appends to files?
-    This is an hdfs config. set in here so the hdfs client will do append support.
-    You must ensure that this config. is true serverside too when running hbase
-    (You will have to restart your cluster after setting it).
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.client.read.shortcircuit</name>
-    <value></value>
-    <description>Enable/Disable short circuit read for your client.
-    Hadoop servers should be configured to allow short circuit read
-    for the hbase user for this to take effect
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.client.read.shortcircuit.skip.checksum</name>
-    <value></value>
-    <description>Enable/disbale skipping the checksum check</description>
-  </property>
-
-  <property>
-    <name>hbase.regionserver.optionalcacheflushinterval</name>
-    <value>10000</value>
-    <description>
-      Amount of time to wait since the last time a region was flushed before
-      invoking an optional cache flush. Default 60,000.
-    </description>
-  </property>
-  
-  <property>
-    <name>hbase.zookeeper.useMulti</name>
-    <value>true</value>
-    <description>Instructs HBase to make use of ZooKeeper's multi-update functionality.
-    This allows certain ZooKeeper operations to complete more quickly and prevents some issues
-    with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).ยท
-    IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+
-    and will not be downgraded.  ZooKeeper versions before 3.4 do not support multi-update and will
-    not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).
-    </description>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HBASE/metainfo.xml
deleted file mode 100644
index 553fa2b..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HBASE/metainfo.xml
+++ /dev/null
@@ -1,40 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>mapred</user>
-    <comment>Non-relational distributed database and centralized service for configuration management &amp; synchronization</comment>
-    <version>0.94.5</version>
-
-    <components>
-        <component>
-            <name>HBASE_MASTER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>HBASE_REGIONSERVER</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>HBASE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HCATALOG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HCATALOG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HCATALOG/metainfo.xml
deleted file mode 100644
index 1951a5d..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HCATALOG/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for HCATALOG service</comment>
-    <version>0.5.0</version>
-
-    <components>
-        <component>
-            <name>HCAT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>


[03/14] AMBARI-3777. Remove HDPLocal stack from stack definition. (yusaku)

Posted by yu...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/PIG/configuration/pig.properties
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/PIG/configuration/pig.properties b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/PIG/configuration/pig.properties
deleted file mode 100644
index 01000b5..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/PIG/configuration/pig.properties
+++ /dev/null
@@ -1,52 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Pig default configuration file. All values can be overwritten by pig.properties and command line arguments.
-# see bin/pig -help
-
-# brief logging (no timestamps)
-brief=false
-
-#debug level, INFO is default
-debug=INFO
-
-#verbose print all log messages to screen (default to print only INFO and above to screen)
-verbose=false
-
-#exectype local|mapreduce, mapreduce is default
-exectype=mapreduce
-
-#Enable insertion of information about script into hadoop job conf 
-pig.script.info.enabled=true
-
-#Do not spill temp files smaller than this size (bytes)
-pig.spill.size.threshold=5000000
-#EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
-#This should help reduce the number of files being spilled.
-pig.spill.gc.activation.size=40000000
-
-#the following two parameters are to help estimate the reducer number
-pig.exec.reducers.bytes.per.reducer=1000000000
-pig.exec.reducers.max=999
-
-#Temporary location to store the intermediate data.
-pig.temp.dir=/tmp/
-
-#Threshold for merging FRJoin fragment files
-pig.files.concatenation.threshold=100
-pig.optimistic.files.concatenation=false;
-
-pig.disable.counter=false

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/PIG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/PIG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/PIG/metainfo.xml
deleted file mode 100644
index 7333eed..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/PIG/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Scripting platform for analyzing large datasets</comment>
-    <version>0.11.2.2.0.5.0</version>
-
-    <components>
-        <component>
-            <name>PIG</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/SQOOP/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/SQOOP/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/SQOOP/metainfo.xml
deleted file mode 100644
index 566ba36..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/SQOOP/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Tool for transferring bulk data between Apache Hadoop and structured data stores such as relational databases</comment>
-    <version>1.4.4.2.0.5.0</version>
-
-    <components>
-        <component>
-            <name>SQOOP</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/WEBHCAT/configuration/webhcat-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/WEBHCAT/configuration/webhcat-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/WEBHCAT/configuration/webhcat-site.xml
deleted file mode 100644
index 31d0113..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/WEBHCAT/configuration/webhcat-site.xml
+++ /dev/null
@@ -1,126 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- 
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<!-- The default settings for Templeton. -->
-<!-- Edit templeton-site.xml to change settings for your local -->
-<!-- install. -->
-
-<configuration>
-
-  <property>
-    <name>templeton.port</name>
-      <value>50111</value>
-    <description>The HTTP port for the main server.</description>
-  </property>
-
-  <property>
-    <name>templeton.hadoop.conf.dir</name>
-    <value>/etc/hadoop/conf</value>
-    <description>The path to the Hadoop configuration.</description>
-  </property>
-
-  <property>
-    <name>templeton.jar</name>
-    <value>/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar</value>
-    <description>The path to the Templeton jar file.</description>
-  </property>
-
-  <property>
-    <name>templeton.libjars</name>
-    <value>/usr/lib/zookeeper/zookeeper.jar</value>
-    <description>Jars to add the the classpath.</description>
-  </property>
-
-
-  <property>
-    <name>templeton.hadoop</name>
-    <value>/usr/bin/hadoop</value>
-    <description>The path to the Hadoop executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.archive</name>
-    <value>hdfs:///apps/webhcat/pig.tar.gz</value>
-    <description>The path to the Pig archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.path</name>
-    <value>pig.tar.gz/pig/bin/pig</value>
-    <description>The path to the Pig executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hcat</name>
-    <value>/usr/bin/hcat</value>
-    <description>The path to the hcatalog executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.archive</name>
-    <value>hdfs:///apps/webhcat/hive.tar.gz</value>
-    <description>The path to the Hive archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.path</name>
-    <value>hive.tar.gz/hive/bin/hive</value>
-    <description>The path to the Hive executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.properties</name>
-    <value></value>
-    <description>Properties to set when running hive.</description>
-  </property>
-
-
-  <property>
-    <name>templeton.zookeeper.hosts</name>
-    <value></value>
-    <description>ZooKeeper servers, as comma separated host:port pairs</description>
-  </property>
-
-  <property>
-    <name>templeton.storage.class</name>
-    <value>org.apache.hcatalog.templeton.tool.ZooKeeperStorage</value>
-    <description>The class to use as storage</description>
-  </property>
-
-  <property>
-   <name>templeton.override.enabled</name>
-   <value>false</value>
-   <description>
-     Enable the override path in templeton.override.jars
-   </description>
- </property>
-
- <property>
-    <name>templeton.streaming.jar</name>
-    <value>hdfs:///apps/webhcat/hadoop-streaming.jar</value>
-    <description>The hdfs path to the Hadoop streaming jar file.</description>
-  </property> 
-
-  <property>
-    <name>templeton.exec.timeout</name>
-    <value>60000</value>
-    <description>Time out for templeton api</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/WEBHCAT/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/WEBHCAT/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/WEBHCAT/metainfo.xml
deleted file mode 100644
index 1ee2f7d..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/WEBHCAT/metainfo.xml
+++ /dev/null
@@ -1,31 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for WEBHCAT service</comment>
-    <version>0.11.0.2.0.5.0</version>
-
-    <components>
-        <component>
-            <name>WEBHCAT_SERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/YARN/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/YARN/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/YARN/configuration/capacity-scheduler.xml
deleted file mode 100644
index ccfb779..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/YARN/configuration/capacity-scheduler.xml
+++ /dev/null
@@ -1,120 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<configuration>
-
-  <property>
-    <name>yarn.scheduler.capacity.maximum-applications</name>
-    <value>10000</value>
-    <description>
-      Maximum number of applications that can be pending and running.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
-    <value>0.2</value>
-    <description>
-      Maximum percent of resources in the cluster which can be used to run 
-      application masters i.e. controls number of concurrent running
-      applications.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.queues</name>
-    <value>default</value>
-    <description>
-      The queues at the this level (root is the root queue).
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.capacity</name>
-    <value>100</value>
-    <description>
-      The total capacity as a percentage out of 100 for this queue.
-      If it has child queues then this includes their capacity as well.
-      The child queues capacity should add up to their parent queue's capacity
-      or less.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.capacity</name>
-    <value>100</value>
-    <description>Default queue target capacity.</description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
-    <value>1</value>
-    <description>
-      Default queue user limit a percentage from 0.0 to 1.0.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
-    <value>100</value>
-    <description>
-      The maximum capacity of the default queue. 
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.state</name>
-    <value>RUNNING</value>
-    <description>
-      The state of the default queue. State can be one of RUNNING or STOPPED.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.acl_submit_jobs</name>
-    <value>*</value>
-    <description>
-      The ACL of who can submit jobs to the default queue.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.acl_administer_jobs</name>
-    <value>*</value>
-    <description>
-      The ACL of who can administer jobs on the default queue.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.acl_administer_queues</name>
-    <value>*</value>
-    <description>
-      The ACL for who can administer this queue i.e. change sub-queue 
-      allocations.
-    </description>
-  </property>
-  
-  <property>
-    <name>yarn.scheduler.capacity.root.unfunded.capacity</name>
-    <value>50</value>
-    <description>
-      No description
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/YARN/configuration/container-executor.cfg
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/YARN/configuration/container-executor.cfg b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/YARN/configuration/container-executor.cfg
deleted file mode 100644
index 502ddaa..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/YARN/configuration/container-executor.cfg
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-yarn.nodemanager.local-dirs=TODO-YARN-LOCAL-DIR
-yarn.nodemanager.linux-container-executor.group=hadoop
-yarn.nodemanager.log-dirs=TODO-YARN-LOG-DIR
-banned.users=hfds,bin,0

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/YARN/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/YARN/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/YARN/configuration/core-site.xml
deleted file mode 100644
index 3a2af49..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/YARN/configuration/core-site.xml
+++ /dev/null
@@ -1,20 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<configuration>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/YARN/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/YARN/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/YARN/configuration/global.xml
deleted file mode 100644
index 7d43a97..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/YARN/configuration/global.xml
+++ /dev/null
@@ -1,64 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>rm_host</name>
-    <value></value>
-    <description>ResourceManager.</description>
-  </property>
-  <property>
-    <name>nm_hosts</name>
-    <value></value>
-    <description>List of NodeManager Hosts.</description>
-  </property>
-  <property>
-    <name>yarn_log_dir_prefix</name>
-    <value>/var/log/hadoop-yarn</value>
-    <description>YARN Log Dir Prefix</description>
-  </property>
-  <property>
-    <name>yarn_pid_dir_prefix</name>
-    <value>/var/run/hadoop-yarn</value>
-    <description>YARN PID Dir Prefix</description>
-  </property>
-  <property>
-    <name>yarn_user</name>
-    <value>yarn</value>
-    <description>YARN User</description>
-  </property>
-  <property>
-    <name>yarn_heapsize</name>
-    <value>1000</value>
-    <description>Max heapsize for all YARN components using a numerical value in the scale of MB</description>
-  </property>
-  <property>
-    <name>resourcemanager_heapsize</name>
-    <value>1000</value>
-    <description>Max heapsize for ResourceManager using a numerical value in the scale of MB</description>
-  </property>
-  <property>
-    <name>nodemanager_heapsize</name>
-    <value>1000</value>
-    <description>Max heapsize for NodeManager using a numerical value in the scale of MB</description>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/YARN/configuration/yarn-site.xml
deleted file mode 100644
index 27d80ab..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/YARN/configuration/yarn-site.xml
+++ /dev/null
@@ -1,312 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-  <!-- ResourceManager -->
-
-  <property>
-    <name>yarn.resourcemanager.resource-tracker.address</name>
-    <value>localhost:8025</value>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.scheduler.address</name>
-    <value>localhost:8030</value>
-    <description>The address of the scheduler interface.</description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.address</name>
-    <value>localhost:8050</value>
-    <description>
-      The address of the applications manager interface in the
-      RM.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.admin.address</name>
-    <value>localhost:8141</value>
-    <description>The address of the RM admin interface.</description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.scheduler.class</name>
-    <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
-    <description>The class to use as the resource scheduler.</description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.minimum-allocation-mb</name>
-    <value>512</value>
-    <description>
-      TThe minimum allocation for every container request at the RM,
-      in MBs. Memory requests lower than this won't take effect,
-      and the specified value will get allocated at minimum.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.maximum-allocation-mb</name>
-    <value>6144</value>
-    <description>
-      The maximum allocation for every container request at the RM,
-      in MBs. Memory requests higher than this won't take effect,
-      and will get capped to this value.
-    </description>
-  </property>
-  
-  <property>
-    <name>yarn.acl.enable</name>
-    <value>true</value>
-  </property>
-  
-  <property>
-    <name>yarn.admin.acl</name>
-    <value>*</value>
-  </property>
-
-  <!-- NodeManager -->
-
-  <property>
-    <name>yarn.nodemanager.address</name>
-    <value>0.0.0.0:45454</value>
-    <description>The address of the container manager in the NM.</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.resource.memory-mb</name>
-    <value>10240</value>
-    <description>Amount of physical memory, in MB, that can be allocated
-      for containers.</description>
-  </property>
-
-  <property>
-    <name>yarn.application.classpath</name>
-    <value>/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*</value>
-    <description>Classpath for typical applications.</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.vmem-pmem-ratio</name>
-    <value>2.1</value>
-    <description>Ratio between virtual memory to physical memory when
-      setting memory limits for containers. Container allocations are
-      expressed in terms of physical memory, and virtual memory usage
-      is allowed to exceed this allocation by this ratio.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.container-executor.class</name>
-    <value>org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor</value>
-    <description>ContainerExecutor for launching containers</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.linux-container-executor.group</name>
-    <value>hadoop</value>
-    <description>Unix group of the NodeManager</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.aux-services</name>
-    <value>mapreduce.shuffle</value>
-    <description>Auxilliary services of NodeManager</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
-    <value>org.apache.hadoop.mapred.ShuffleHandler</value>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.log-dirs</name>
-    <value>/hadoop/yarn/log</value>
-    <description>
-      Where to store container logs. An application's localized log directory
-      will be found in ${yarn.nodemanager.log-dirs}/application_${appid}.
-      Individual containers' log directories will be below this, in directories
-      named container_{$contid}. Each container directory will contain the files
-      stderr, stdin, and syslog generated by that container.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.local-dirs</name>
-    <value>/hadoop/yarn/local</value>
-    <description>
-      List of directories to store localized files in. An
-      application's localized file directory will be found in:
-      ${yarn.nodemanager.local-dirs}/usercache/${user}/appcache/application_${appid}.
-      Individual containers' work directories, called container_${contid}, will
-      be subdirectories of this.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.container-monitor.interval-ms</name>
-    <value>3000</value>
-    <description>
-      The interval, in milliseconds, for which the node manager
-      waits  between two cycles of monitoring its containers' memory usage.
-    </description>
-  </property>
-
-  <!-- 
-  <property>
-    <name>yarn.nodemanager.health-checker.script.path</name>
-    <value>/etc/hadoop/conf/health_check_nodemanager</value>
-    <description>The health check script to run.</description>
-  </property>
-   -->
-
-  <property>
-    <name>yarn.nodemanager.health-checker.interval-ms</name>
-    <value>135000</value>
-    <description>Frequency of running node health script.</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.health-checker.script.timeout-ms</name>
-    <value>60000</value>
-    <description>Script time out period.</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.log.retain-second</name>
-    <value>604800</value>
-    <description>
-      Time in seconds to retain user logs. Only applicable if
-      log aggregation is disabled.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.log-aggregation-enable</name>
-    <value>true</value>
-    <description>Whether to enable log aggregation</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.remote-app-log-dir</name>
-    <value>/app-logs</value>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.remote-app-log-dir-suffix</name>
-    <value>logs</value>
-    <description>
-      The remote log dir will be created at
-      {yarn.nodemanager.remote-app-log-dir}/${user}/{thisParam}.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.log-aggregation.compression-type</name>
-    <value>gz</value>
-    <description>
-      T-file compression types used to compress aggregated logs.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.delete.debug-delay-sec</name>
-    <value>0</value>
-    <description>
-      Number of seconds after an application finishes before the nodemanager's
-      DeletionService will delete the application's localized file directory
-      and log directory.
-
-      To diagnose Yarn application problems, set this property's value large
-      enough (for example, to 600 = 10 minutes) to permit examination of these
-      directories. After changing the property's value, you must restart the
-      nodemanager in order for it to have an effect.
-
-      The roots of Yarn applications' work directories is configurable with
-      the yarn.nodemanager.local-dirs property (see below), and the roots
-      of the Yarn applications' log directories is configurable with the
-      yarn.nodemanager.log-dirs property (see also below).
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.log-aggregation.retain-seconds</name>
-    <value>2592000</value>
-    <description>
-      How long to keep aggregation logs before deleting them. -1 disables.
-      Be careful set this too small and you will spam the name node.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.admin-env</name>
-    <value>MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX</value>
-    <description>
-      Environment variables that should be forwarded from the NodeManager's
-      environment to the container's.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.disk-health-checker.min-healthy-disks</name>
-    <value>0.25</value>
-    <description>
-      The minimum fraction of number of disks to be healthy for the nodemanager
-      to launch new containers. This correspond to both
-      yarn-nodemanager.local-dirs and yarn.nodemanager.log-dirs. i.e.
-      If there are less number of healthy local-dirs (or log-dirs) available,
-      then new containers will not be launched on this node.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.am.max-attempts</name>
-    <value>2</value>
-    <description>
-      The maximum number of application attempts. It's a global
-      setting for all application masters. Each application master can specify
-      its individual maximum number of application attempts via the API, but the
-      individual number cannot be more than the global upper bound. If it is,
-      the resourcemanager will override it. The default number is set to 2, to
-      allow at least one retry for AM.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.webapp.address</name>
-    <value>localhost:8088</value>
-    <description>
-      The address of the RM web application.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.vmem-check-enabled</name>
-    <value>false</value>
-    <description>
-      Whether virtual memory limits will be enforced for containers.
-    </description>
-  </property>
-
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/YARN/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/YARN/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/YARN/metainfo.xml
deleted file mode 100644
index 8517e31..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/YARN/metainfo.xml
+++ /dev/null
@@ -1,36 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>mapred</user>
-    <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
-    <version>2.1.0.2.0.5.0</version>
-    <components>
-        <component>
-            <name>RESOURCEMANAGER</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>NODEMANAGER</name>
-            <category>SLAVE</category>
-        </component>
-       <component>
-            <name>YARN_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/ZOOKEEPER/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/ZOOKEEPER/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/ZOOKEEPER/configuration/global.xml
deleted file mode 100644
index f78df89..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/ZOOKEEPER/configuration/global.xml
+++ /dev/null
@@ -1,75 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>zk_user</name>
-    <value>zookeeper</value>
-    <description>ZooKeeper User.</description>
-  </property>
-  <property>
-    <name>zookeeperserver_host</name>
-    <value></value>
-    <description>ZooKeeper Server Hosts.</description>
-  </property>
-  <property>
-    <name>zk_data_dir</name>
-    <value>/hadoop/zookeeper</value>
-    <description>Data directory for ZooKeeper.</description>
-  </property>
-  <property>
-    <name>zk_log_dir</name>
-    <value>/var/log/zookeeper</value>
-    <description>ZooKeeper Log Dir</description>
-  </property>
-  <property>
-    <name>zk_pid_dir</name>
-    <value>/var/run/zookeeper</value>
-    <description>ZooKeeper Pid Dir</description>
-  </property>
-  <property>
-    <name>zk_pid_file</name>
-    <value>/var/run/zookeeper/zookeeper_server.pid</value>
-    <description>ZooKeeper Pid File</description>
-  </property>
-  <property>
-    <name>tickTime</name>
-    <value>2000</value>
-    <description>The length of a single tick in milliseconds, which is the basic time unit used by ZooKeeper</description>
-  </property>
-  <property>
-    <name>initLimit</name>
-    <value>10</value>
-    <description>Ticks to allow for sync at Init.</description>
-  </property>
-  <property>
-    <name>syncLimit</name>
-    <value>5</value>
-    <description>Ticks to allow for sync at Runtime.</description>
-  </property>
-  <property>
-    <name>clientPort</name>
-    <value>2181</value>
-    <description>Port for running ZK Server.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/ZOOKEEPER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/ZOOKEEPER/metainfo.xml
deleted file mode 100644
index 41d907e..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.5/services/ZOOKEEPER/metainfo.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Centralized service which provides highly reliable distributed coordination</comment>
-    <version>3.4.5.2.0.5.0</version>
-
-    <components>
-        <component>
-            <name>ZOOKEEPER_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>ZOOKEEPER_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/metainfo.xml
deleted file mode 100644
index 45a63e5..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/metainfo.xml
+++ /dev/null
@@ -1,22 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <versions>
-	  <active>false</active>
-    </versions>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/repos/repoinfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/repos/repoinfo.xml
deleted file mode 100644
index 6409d73..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/repos/repoinfo.xml
+++ /dev/null
@@ -1,75 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<reposinfo>
-  <os type="centos6">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0</baseurl>
-      <repoid>HDP-2.0.6</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="centos5">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos5/2.x/updates/2.0.6.0</baseurl>
-      <repoid>HDP-2.0.6</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="redhat6">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0</baseurl>
-      <repoid>HDP-2.0.6</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="redhat5">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos5/2.x/updates/2.0.6.0</baseurl>
-      <repoid>HDP-2.0.6</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="oraclelinux6">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0</baseurl>
-      <repoid>HDP-2.0.6</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="oraclelinux5">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos5/2.x/updates/2.0.6.0</baseurl>
-      <repoid>HDP-2.0.6</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="suse11">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/suse11/2.x/updates/2.0.6.0</baseurl>
-      <repoid>HDP-2.0.6</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="sles11">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/suse11/2.x/updates/2.0.6.0</baseurl>
-      <repoid>HDP-2.0.6</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-</reposinfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/GANGLIA/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/GANGLIA/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/GANGLIA/metainfo.xml
deleted file mode 100644
index 9f7444b..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/GANGLIA/metainfo.xml
+++ /dev/null
@@ -1,36 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Ganglia Metrics Collection system</comment>
-    <version>3.5.0</version>
-
-    <components>
-        <component>
-            <name>GANGLIA_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>GANGLIA_MONITOR</name>
-            <category>SLAVE</category>
-        </component>
-
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HBASE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HBASE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HBASE/configuration/global.xml
deleted file mode 100644
index b2c57bd..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HBASE/configuration/global.xml
+++ /dev/null
@@ -1,160 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>hbasemaster_host</name>
-    <value></value>
-    <description>HBase Master Host.</description>
-  </property>
-  <property>
-    <name>regionserver_hosts</name>
-    <value></value>
-    <description>Region Server Hosts</description>
-  </property>
-  <property>
-    <name>hbase_log_dir</name>
-    <value>/var/log/hbase</value>
-    <description>Log Directories for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_pid_dir</name>
-    <value>/var/run/hbase</value>
-    <description>Log Directories for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_log_dir</name>
-    <value>/var/log/hbase</value>
-    <description>Log Directories for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_regionserver_heapsize</name>
-    <value>1024</value>
-    <description>Log Directories for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_master_heapsize</name>
-    <value>1024</value>
-    <description>HBase Master Heap Size</description>
-  </property>
-  <property>
-    <name>hstore_compactionthreshold</name>
-    <value>3</value>
-    <description>HBase HStore compaction threshold.</description>
-  </property>
-  <property>
-    <name>hfile_blockcache_size</name>
-    <value>0.40</value>
-    <description>HFile block cache size.</description>
-  </property>
-  <property>
-    <name>hstorefile_maxsize</name>
-    <value>10737418240</value>
-    <description>Maximum HStoreFile Size</description>
-  </property>
-    <property>
-    <name>regionserver_handlers</name>
-    <value>60</value>
-    <description>HBase RegionServer Handler</description>
-  </property>
-    <property>
-    <name>hregion_majorcompaction</name>
-    <value>604800000</value>
-    <description>The time between major compactions of all HStoreFiles in a region. Set to 0 to disable automated major compactions.</description>
-  </property>
-    <property>
-    <name>hregion_blockmultiplier</name>
-    <value>2</value>
-    <description>HBase Region Block Multiplier</description>
-  </property>
-    <property>
-    <name>hregion_memstoreflushsize</name>
-    <value></value>
-    <description>HBase Region MemStore Flush Size.</description>
-  </property>
-    <property>
-    <name>client_scannercaching</name>
-    <value>100</value>
-    <description>Base Client Scanner Caching</description>
-  </property>
-    <property>
-    <name>zookeeper_sessiontimeout</name>
-    <value>30000</value>
-    <description>ZooKeeper Session Timeout</description>
-  </property>
-    <property>
-    <name>hfile_max_keyvalue_size</name>
-    <value>10485760</value>
-    <description>HBase Client Maximum key-value Size</description>
-  </property>
-  <property>
-    <name>hbase_hdfs_root_dir</name>
-    <value>/apps/hbase/data</value>
-    <description>HBase Relative Path to HDFS.</description>
-  </property>
-   <property>
-    <name>hbase_conf_dir</name>
-    <value>/etc/hbase</value>
-    <description>Config Directory for HBase.</description>
-  </property>
-   <property>
-    <name>hdfs_enable_shortcircuit_read</name>
-    <value>true</value>
-    <description>HDFS Short Circuit Read</description>
-  </property>
-   <property>
-    <name>hdfs_support_append</name>
-    <value>true</value>
-    <description>HDFS append support</description>
-  </property>
-   <property>
-    <name>hstore_blockingstorefiles</name>
-    <value>10</value>
-    <description>HStore blocking storefiles.</description>
-  </property>
-   <property>
-    <name>regionserver_memstore_lab</name>
-    <value>true</value>
-    <description>Region Server memstore.</description>
-  </property>
-   <property>
-    <name>regionserver_memstore_lowerlimit</name>
-    <value>0.38</value>
-    <description>Region Server memstore lower limit.</description>
-  </property>
-   <property>
-    <name>regionserver_memstore_upperlimit</name>
-    <value>0.4</value>
-    <description>Region Server memstore upper limit.</description>
-  </property>
-   <property>
-    <name>hbase_conf_dir</name>
-    <value>/etc/hbase</value>
-    <description>HBase conf dir.</description>
-  </property>
-   <property>
-    <name>hbase_user</name>
-    <value>hbase</value>
-    <description>HBase User Name.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HBASE/configuration/hbase-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HBASE/configuration/hbase-policy.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HBASE/configuration/hbase-policy.xml
deleted file mode 100644
index e45f23c..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HBASE/configuration/hbase-policy.xml
+++ /dev/null
@@ -1,53 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HRegionInterface protocol implementations (ie. 
-    clients talking to HRegionServers)
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.admin.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HMasterInterface protocol implementation (ie. 
-    clients talking to HMaster for admin operations).
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.masterregion.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HMasterRegionInterface protocol implementations
-    (for HRegionServers communicating with HMaster)
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HBASE/configuration/hbase-site.xml
deleted file mode 100644
index 6538941..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HBASE/configuration/hbase-site.xml
+++ /dev/null
@@ -1,356 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>hbase.rootdir</name>
-    <value></value>
-    <description>The directory shared by region servers and into
-      which HBase persists.  The URL should be 'fully-qualified'
-      to include the filesystem scheme.  For example, to specify the
-      HDFS directory '/hbase' where the HDFS instance's namenode is
-      running at namenode.example.org on port 9000, set this value to:
-      hdfs://namenode.example.org:9000/hbase.  By default HBase writes
-      into /tmp.  Change this configuration else all data will be lost
-      on machine restart.
-    </description>
-  </property>
-  <property>
-    <name>hbase.cluster.distributed</name>
-    <value>true</value>
-    <description>The mode the cluster will be in. Possible values are
-      false for standalone mode and true for distributed mode.  If
-      false, startup will run all HBase and ZooKeeper daemons together
-      in the one JVM.
-    </description>
-  </property>
-  <property>
-    <name>hbase.tmp.dir</name>
-    <value>/hadoop/hbase</value>
-    <description>Temporary directory on the local filesystem.
-      Change this setting to point to a location more permanent
-      than '/tmp' (The '/tmp' directory is often cleared on
-      machine restart).
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.bindAddress</name>
-    <value></value>
-    <description>The bind address for the HBase Master web UI
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.port</name>
-    <value></value>
-    <description>The port for the HBase Master web UI.</description>
-  </property>
-  <property>
-    <name>hbase.regionserver.info.port</name>
-    <value></value>
-    <description>The port for the HBase RegionServer web UI.</description>
-  </property>
-  <property>
-    <name>hbase.regionserver.global.memstore.upperLimit</name>
-    <value>0.4</value>
-    <description>Maximum size of all memstores in a region server before new
-      updates are blocked and flushes are forced. Defaults to 40% of heap
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.handler.count</name>
-    <value>60</value>
-    <description>Count of RPC Listener instances spun up on RegionServers.
-      Same property is used by the Master for count of master handlers.
-      Default is 10.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.majorcompaction</name>
-    <value>86400000</value>
-    <description>The time (in milliseconds) between 'major' compactions of all
-      HStoreFiles in a region.  Default: 1 day.
-      Set to 0 to disable automated major compactions.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.regionserver.global.memstore.lowerLimit</name>
-    <value>0.38</value>
-    <description>When memstores are being forced to flush to make room in
-      memory, keep flushing until we hit this mark. Defaults to 35% of heap.
-      This value equal to hbase.regionserver.global.memstore.upperLimit causes
-      the minimum possible flushing to occur when updates are blocked due to
-      memstore limiting.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.block.multiplier</name>
-    <value>2</value>
-    <description>Block updates if memstore has hbase.hregion.memstore.block.multiplier
-      time hbase.hregion.flush.size bytes.  Useful preventing
-      runaway memstore during spikes in update traffic.  Without an
-      upper-bound, memstore fills such that when it flushes the
-      resultant flush files take a long time to compact or split, or
-      worse, we OOME
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.flush.size</name>
-    <value>134217728</value>
-    <description>
-      Memstore will be flushed to disk if size of the memstore
-      exceeds this number of bytes.  Value is checked by a thread that runs
-      every hbase.server.thread.wakefrequency.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.mslab.enabled</name>
-    <value>true</value>
-    <description>
-      Enables the MemStore-Local Allocation Buffer,
-      a feature which works to prevent heap fragmentation under
-      heavy write loads. This can reduce the frequency of stop-the-world
-      GC pauses on large heaps.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.max.filesize</name>
-    <value>10737418240</value>
-    <description>
-      Maximum HStoreFile size. If any one of a column families' HStoreFiles has
-      grown to exceed this value, the hosting HRegion is split in two.
-      Default: 1G.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.scanner.caching</name>
-    <value>100</value>
-    <description>Number of rows that will be fetched when calling next
-      on a scanner if it is not served from (local, client) memory. Higher
-      caching values will enable faster scanners but will eat up more memory
-      and some calls of next may take longer and longer times when the cache is empty.
-      Do not set this value such that the time between invocations is greater
-      than the scanner timeout; i.e. hbase.regionserver.lease.period
-    </description>
-  </property>
-  <property>
-    <name>zookeeper.session.timeout</name>
-    <value>30000</value>
-    <description>ZooKeeper session timeout.
-      HBase passes this to the zk quorum as suggested maximum time for a
-      session (This setting becomes zookeeper's 'maxSessionTimeout').  See
-      http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
-      "The client sends a requested timeout, the server responds with the
-      timeout that it can give the client. " In milliseconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.keyvalue.maxsize</name>
-    <value>10485760</value>
-    <description>Specifies the combined maximum allowed size of a KeyValue
-      instance. This is to set an upper boundary for a single entry saved in a
-      storage file. Since they cannot be split it helps avoiding that a region
-      cannot be split any further because the data is too large. It seems wise
-      to set this to a fraction of the maximum region size. Setting it to zero
-      or less disables the check.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.compactionThreshold</name>
-    <value>3</value>
-    <description>
-      If more than this number of HStoreFiles in any one HStore
-      (one HStoreFile is written per flush of memstore) then a compaction
-      is run to rewrite all HStoreFiles files as one.  Larger numbers
-      put off compaction but when it runs, it takes longer to complete.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.flush.retries.number</name>
-    <value>120</value>
-    <description>
-      The number of times the region flush operation will be retried.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.hstore.blockingStoreFiles</name>
-    <value>10</value>
-    <description>
-      If more than this number of StoreFiles in any one Store
-      (one StoreFile is written per flush of MemStore) then updates are
-      blocked for this HRegion until a compaction is completed, or
-      until hbase.hstore.blockingWaitTime has been exceeded.
-    </description>
-  </property>
-  <property>
-    <name>hfile.block.cache.size</name>
-    <value>0.40</value>
-    <description>
-      Percentage of maximum heap (-Xmx setting) to allocate to block cache
-      used by HFile/StoreFile. Default of 0.25 means allocate 25%.
-      Set to 0 to disable but it's not recommended.
-    </description>
-  </property>
-
-  <!-- The following properties configure authentication information for
-       HBase processes when using Kerberos security.  There are no default
-       values, included here for documentation purposes -->
-  <property>
-    <name>hbase.master.keytab.file</name>
-    <value></value>
-    <description>Full path to the kerberos keytab file to use for logging in
-      the configured HMaster server principal.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.kerberos.principal</name>
-    <value></value>
-    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
-      that should be used to run the HMaster process.  The principal name should
-      be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the hostname
-      portion, it will be replaced with the actual hostname of the running
-      instance.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.keytab.file</name>
-    <value></value>
-    <description>Full path to the kerberos keytab file to use for logging in
-      the configured HRegionServer server principal.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.kerberos.principal</name>
-    <value></value>
-    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
-      that should be used to run the HRegionServer process.  The principal name
-      should be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the
-      hostname portion, it will be replaced with the actual hostname of the
-      running instance.  An entry for this principal must exist in the file
-      specified in hbase.regionserver.keytab.file
-    </description>
-  </property>
-
-  <!-- Additional configuration specific to HBase security -->
-  <property>
-    <name>hbase.superuser</name>
-    <value>hbase</value>
-    <description>List of users or groups (comma-separated), who are allowed
-      full privileges, regardless of stored ACLs, across the cluster.
-      Only used when HBase security is enabled.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.security.authentication</name>
-    <value>simple</value>
-  </property>
-
-  <property>
-    <name>hbase.security.authorization</name>
-    <value>false</value>
-    <description>Enables HBase authorization. Set the value of this property to false to disable HBase authorization.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.region.classes</name>
-    <value></value>
-    <description>A comma-separated list of Coprocessors that are loaded by
-      default on all tables. For any override coprocessor method, these classes
-      will be called in order. After implementing your own Coprocessor, just put
-      it in HBase's classpath and add the fully qualified class name here.
-      A coprocessor can also be loaded on demand by setting HTableDescriptor.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.master.classes</name>
-    <value></value>
-    <description>A comma-separated list of
-      org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
-      loaded by default on the active HMaster process. For any implemented
-      coprocessor methods, the listed classes will be called in order. After
-      implementing your own MasterObserver, just put it in HBase's classpath
-      and add the fully qualified class name here.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.zookeeper.property.clientPort</name>
-    <value>2181</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-      The port at which the clients will connect.
-    </description>
-  </property>
-
-  <!--
-  The following three properties are used together to create the list of
-  host:peer_port:leader_port quorum servers for ZooKeeper.
-  -->
-  <property>
-    <name>hbase.zookeeper.quorum</name>
-    <value>localhost</value>
-    <description>Comma separated list of servers in the ZooKeeper Quorum.
-      For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
-      By default this is set to localhost for local and pseudo-distributed modes
-      of operation. For a fully-distributed setup, this should be set to a full
-      list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
-      this is the list of servers which we will start/stop ZooKeeper on.
-    </description>
-  </property>
-  <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
-
-  <property>
-    <name>hbase.zookeeper.useMulti</name>
-    <value>true</value>
-    <description>Instructs HBase to make use of ZooKeeper's multi-update functionality.
-      This allows certain ZooKeeper operations to complete more quickly and prevents some issues
-      with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).ยท
-      IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+
-      and will not be downgraded.  ZooKeeper versions before 3.4 do not support multi-update and will
-      not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).
-    </description>
-  </property>
-  <property>
-    <name>zookeeper.znode.parent</name>
-    <value>/hbase-unsecure</value>
-    <description>Root ZNode for HBase in ZooKeeper. All of HBase's ZooKeeper
-      files that are configured with a relative path will go under this node.
-      By default, all of HBase's ZooKeeper file path are configured with a
-      relative path, so they will all go under this directory unless changed.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.defaults.for.version.skip</name>
-    <value>true</value>
-    <description>Disables version verification.</description>
-  </property>
-
-  <property>
-    <name>dfs.domain.socket.path</name>
-    <value>/var/lib/hadoop-hdfs/dn_socket</value>
-    <description>Path to domain socket.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HBASE/metainfo.xml
deleted file mode 100644
index c17a87e..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HBASE/metainfo.xml
+++ /dev/null
@@ -1,44 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>mapred</user>
-    <comment>Non-relational distributed database and centralized service for configuration management &amp; synchronization</comment>
-    <version>0.96.0.2.0.6.0</version>
-
-    <components>
-        <component>
-            <name>HBASE_MASTER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>HBASE_REGIONSERVER</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>HBASE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-    <configuration-dependencies>
-      <config-type>global</config-type>
-      <config-type>hbase-site</config-type>
-      <config-type>hbase-policy</config-type>
-    </configuration-dependencies>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HCATALOG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HCATALOG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HCATALOG/metainfo.xml
deleted file mode 100644
index 3b165d8..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HCATALOG/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for HCATALOG service</comment>
-    <version>0.12.0.2.0.6.0</version>
-
-    <components>
-        <component>
-            <name>HCAT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HDFS/configuration/core-site.xml
deleted file mode 100644
index 22626ce..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HDFS/configuration/core-site.xml
+++ /dev/null
@@ -1,167 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-  <!-- i/o properties -->
-
-  <property>
-    <name>io.file.buffer.size</name>
-    <value>131072</value>
-    <description>The size of buffer for use in sequence files.
-      The size of this buffer should probably be a multiple of hardware
-      page size (4096 on Intel x86), and it determines how much data is
-      buffered during read and write operations.</description>
-  </property>
-
-  <property>
-    <name>io.serializations</name>
-    <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
-  </property>
-
-  <property>
-    <name>io.compression.codecs</name>
-    <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec</value>
-    <description>A list of the compression codec classes that can be used
-      for compression/decompression.</description>
-  </property>
-
-  <!-- file system properties -->
-
-  <property>
-    <name>fs.defaultFS</name>
-    <!-- cluster variant -->
-    <value>hdfs://localhost:8020</value>
-    <description>The name of the default file system.  Either the
-      literal string "local" or a host:port for NDFS.</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>fs.trash.interval</name>
-    <value>360</value>
-    <description>Number of minutes between trash checkpoints.
-      If zero, the trash feature is disabled.
-    </description>
-  </property>
-
-  <!-- ipc properties: copied from kryptonite configuration -->
-  <property>
-    <name>ipc.client.idlethreshold</name>
-    <value>8000</value>
-    <description>Defines the threshold number of connections after which
-      connections will be inspected for idleness.
-    </description>
-  </property>
-
-  <property>
-    <name>ipc.client.connection.maxidletime</name>
-    <value>30000</value>
-    <description>The maximum time after which a client will bring down the
-      connection to the server.
-    </description>
-  </property>
-
-  <property>
-    <name>ipc.client.connect.max.retries</name>
-    <value>50</value>
-    <description>Defines the maximum number of retries for IPC connections.</description>
-  </property>
-
-  <!-- Web Interface Configuration -->
-  <property>
-    <name>mapreduce.jobtracker.webinterface.trusted</name>
-    <value>false</value>
-    <description> If set to true, the web interfaces of JT and NN may contain
-      actions, such as kill job, delete file, etc., that should
-      not be exposed to public. Enable this option if the interfaces
-      are only reachable by those who have the right authorization.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.security.authentication</name>
-    <value>simple</value>
-    <description>
-      Set the authentication for the cluster. Valid values are: simple or
-      kerberos.
-    </description>
-  </property>
-  <property>
-    <name>hadoop.security.authorization</name>
-    <value>false</value>
-    <description>
-      Enable authorization for different protocols.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.security.auth_to_local</name>
-    <value>
-      RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/
-      RULE:[2:$1@$0](jhs@.*)s/.*/mapred/
-      RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/
-      RULE:[2:$1@$0](hm@.*)s/.*/hbase/
-      RULE:[2:$1@$0](rs@.*)s/.*/hbase/
-      DEFAULT
-    </value>
-    <description>The mapping from kerberos principal names to local OS mapreduce.job.user.names.
-      So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
-      "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
-      The translations rules have 3 sections:
-      base     filter    substitution
-      The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
-
-      [1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
-      [2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
-      [2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
-
-      The filter is a regex in parens that must the generated string for the rule to apply.
-
-      "(.*%admin)" will take any string that ends in "%admin"
-      "(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
-
-      Finally, the substitution is a sed rule to translate a regex into a fixed string.
-
-      "s/@ACME\.COM//" removes the first instance of "@ACME.COM".
-      "s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
-      "s/X/Y/g" replaces all of the "X" in the name with "Y"
-
-      So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
-
-      RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-      DEFAULT
-
-      To also translate the names with a second component, you'd make the rules:
-
-      RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-      RULE:[2:$1@$0](.@ACME.ORG)s/@.//
-      DEFAULT
-
-      If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
-
-      RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
-      DEFAULT
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HDFS/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HDFS/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HDFS/configuration/global.xml
deleted file mode 100644
index 59b68ac..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HDFS/configuration/global.xml
+++ /dev/null
@@ -1,192 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>namenode_host</name>
-    <value></value>
-    <description>NameNode Host.</description>
-  </property>
-  <property>
-    <name>dfs_namenode_name_dir</name>
-    <value>/hadoop/hdfs/namenode</value>
-    <description>NameNode Directories.</description>
-  </property>
-  <property>
-    <name>snamenode_host</name>
-    <value></value>
-    <description>Secondary NameNode.</description>
-  </property>
-  <property>
-    <name>dfs_namenode_checkpoint_dir</name>
-    <value>/hadoop/hdfs/namesecondary</value>
-    <description>Secondary NameNode checkpoint dir.</description>
-  </property>
-  <property>
-    <name>datanode_hosts</name>
-    <value></value>
-    <description>List of Datanode Hosts.</description>
-  </property>
-  <property>
-    <name>dfs_datanode_data_dir</name>
-    <value>/hadoop/hdfs/data</value>
-    <description>Data directories for Data Nodes.</description>
-  </property>
-  <property>
-    <name>hdfs_log_dir_prefix</name>
-    <value>/var/log/hadoop</value>
-    <description>Hadoop Log Dir Prefix</description>
-  </property>
-  <property>
-    <name>hadoop_pid_dir_prefix</name>
-    <value>/var/run/hadoop</value>
-    <description>Hadoop PID Dir Prefix</description>
-  </property>
-  <property>
-    <name>dfs_webhdfs_enabled</name>
-    <value>true</value>
-    <description>WebHDFS enabled</description>
-  </property>
-  <property>
-    <name>hadoop_heapsize</name>
-    <value>1024</value>
-    <description>Hadoop maximum Java heap size</description>
-  </property>
-  <property>
-    <name>namenode_heapsize</name>
-    <value>1024</value>
-    <description>NameNode Java heap size</description>
-  </property>
-  <property>
-    <name>namenode_opt_newsize</name>
-    <value>200</value>
-    <description>NameNode new generation size</description>
-  </property>
-  <property>
-    <name>namenode_opt_maxnewsize</name>
-    <value>640</value>
-    <description>NameNode maximum new generation size</description>
-  </property>
-  <property>
-    <name>datanode_du_reserved</name>
-    <value>1073741824</value>
-    <description>Reserved space for HDFS</description>
-  </property>
-  <property>
-    <name>dtnode_heapsize</name>
-    <value>1024</value>
-    <description>DataNode maximum Java heap size</description>
-  </property>
-  <property>
-    <name>dfs_datanode_failed_volume_tolerated</name>
-    <value>0</value>
-    <description>DataNode volumes failure toleration</description>
-  </property>
-  <property>
-    <name>dfs_namenode_checkpoint_period</name>
-    <value>21600</value>
-    <description>HDFS Maximum Checkpoint Delay</description>
-  </property>
-  <property>
-    <name>fs_checkpoint_size</name>
-    <value>0.5</value>
-    <description>FS Checkpoint Size.</description>
-  </property>
-  <property>
-    <name>proxyuser_group</name>
-    <value>users</value>
-    <description>Proxy user group.</description>
-  </property>
-  <property>
-    <name>dfs_exclude</name>
-    <value></value>
-    <description>HDFS Exclude hosts.</description>
-  </property>
-  <property>
-    <name>dfs_replication</name>
-    <value>3</value>
-    <description>Default Block Replication.</description>
-  </property>
-  <property>
-    <name>dfs_block_local_path_access_user</name>
-    <value>hbase</value>
-    <description>Default Block Replication.</description>
-  </property>
-  <property>
-    <name>dfs_datanode_address</name>
-    <value>50010</value>
-    <description>Port for datanode address.</description>
-  </property>
-  <property>
-    <name>dfs_datanode_http_address</name>
-    <value>50075</value>
-    <description>Port for datanode address.</description>
-  </property>
-  <property>
-    <name>dfs_datanode_data_dir_perm</name>
-    <value>750</value>
-    <description>Datanode dir perms.</description>
-  </property>
-
-  <property>
-    <name>security_enabled</name>
-    <value>false</value>
-    <description>Hadoop Security</description>
-  </property>
-  <property>
-    <name>kerberos_domain</name>
-    <value>EXAMPLE.COM</value>
-    <description>Kerberos realm.</description>
-  </property>
-  <property>
-    <name>kadmin_pw</name>
-    <value></value>
-    <description>Kerberos realm admin password</description>
-  </property>
-  <property>
-    <name>keytab_path</name>
-    <value>/etc/security/keytabs</value>
-    <description>Kerberos keytab path.</description>
-  </property>
-  
-  <property>
-    <name>keytab_path</name>
-    <value>/etc/security/keytabs</value>
-    <description>KeyTab Directory.</description>
-  </property>
-    <property>
-    <name>namenode_formatted_mark_dir</name>
-    <value>/var/run/hadoop/hdfs/namenode/formatted/</value>
-    <description>Formatteed Mark Directory.</description>
-  </property>
-    <property>
-    <name>hdfs_user</name>
-    <value>hdfs</value>
-    <description>User and Groups.</description>
-  </property>
-  <property>
-    <name>lzo_enabled</name>
-    <value>true</value>
-    <description>LZO compression enabled</description>
-  </property>
-  
-</configuration>


[10/14] AMBARI-3777. Remove HDPLocal stack from stack definition. (yusaku)

Posted by yu...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HIVE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HIVE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HIVE/configuration/global.xml
deleted file mode 100644
index d9adc80..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HIVE/configuration/global.xml
+++ /dev/null
@@ -1,125 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>hivemetastore_host</name>
-    <value></value>
-    <description>Hive Metastore host.</description>
-  </property>
-  <property>
-    <name>hive_database</name>
-    <value></value>
-    <description>Hive database name.</description>
-  </property>
-  <property>
-    <name>hive_existing_mysql_database</name>
-    <value></value>
-    <description>Hive database name.</description>
-  </property>
-  <property>
-    <name>hive_existing_mysql_host</name>
-    <value></value>
-    <description></description>
-  </property>
-  <property>
-    <name>hive_existing_oracle_database</name>
-    <value></value>
-    <description>Hive database name.</description>
-  </property>
-  <property>
-    <name>hive_existing_oracle_host</name>
-    <value></value>
-    <description></description>
-  </property>
-  <property>
-    <name>hive_ambari_database</name>
-    <value>MySQL</value>
-    <description>Database type.</description>
-  </property>  
-  <property>
-    <name>hive_ambari_host</name>
-    <value></value>
-    <description>Database hostname.</description>
-  </property>
-  <property>
-    <name>hive_database_name</name>
-    <value></value>
-    <description>Database hname</description>
-  </property>    
-  <property>
-    <name>hive_metastore_user_name</name>
-    <value>hive</value>
-    <description>Database username to use to connect to the database.</description>
-  </property>    
-  <property>
-    <name>hive_metastore_user_passwd</name>
-    <value></value>
-    <description>Database password to use to connect to the database.</description>
-  </property>    
-  <property>
-    <name>hive_metastore_port</name>
-    <value>9083</value>
-    <description>Hive Metastore port.</description>
-  </property>    
-  <property>
-    <name>hive_lib</name>
-    <value>/usr/lib/hive/lib/</value>
-    <description>Hive Library.</description>
-  </property>    
-  <property>
-    <name>hive_dbroot</name>
-    <value>/usr/lib/hive/lib/</value>
-    <description>Hive DB Directory.</description>
-  </property>      
-  <property>
-    <name>hive_conf_dir</name>
-    <value>/etc/hive/conf</value>
-    <description>Hive Conf Dir.</description>
-  </property>
-  <property>
-    <name>hive_log_dir</name>
-    <value>/var/log/hive</value>
-    <description>Directory for Hive Log files.</description>
-  </property>
-  <property>
-    <name>hive_pid_dir</name>
-    <value>/var/run/hive</value>
-    <description>Hive PID Dir.</description>
-  </property>
-  <property>
-    <name>mysql_connector_url</name>
-    <value>${download_url}/mysql-connector-java-5.1.18.zip</value>
-    <description>Hive PID Dir.</description>
-  </property>
-  <property>
-    <name>hive_aux_jars_path</name>
-    <value>/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar</value>
-    <description>Hive auxiliary jar path.</description>
-  </property>
-  <property>
-    <name>hive_user</name>
-    <value>hive</value>
-    <description>Hive User.</description>
-  </property>
-  
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HIVE/configuration/hive-site.xml
deleted file mode 100644
index 40fa0a7..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HIVE/configuration/hive-site.xml
+++ /dev/null
@@ -1,243 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<configuration>
-  <property>
-    <name>hive.metastore.local</name>
-    <value>false</value>
-    <description>controls whether to connect to remove metastore server or
-    open a new metastore server in Hive Client JVM</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionURL</name>
-    <value></value>
-    <description>JDBC connect string for a JDBC metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionDriverName</name>
-    <value>com.mysql.jdbc.Driver</value>
-    <description>Driver class name for a JDBC metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionUserName</name>
-    <value></value>
-    <description>username to use against metastore database</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionPassword</name>
-    <value></value>
-    <description>password to use against metastore database</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.warehouse.dir</name>
-    <value>/apps/hive/warehouse</value>
-    <description>location of default database for the warehouse</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.sasl.enabled</name>
-    <value></value>
-    <description>If true, the metastore thrift interface will be secured with SASL.
-     Clients must authenticate with Kerberos.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.kerberos.keytab.file</name>
-    <value></value>
-    <description>The path to the Kerberos Keytab file containing the metastore
-     thrift server's service principal.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.kerberos.principal</name>
-    <value></value>
-    <description>The service principal for the metastore thrift server. The special
-    string _HOST will be replaced automatically with the correct host name.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.cache.pinobjtypes</name>
-    <value>Table,Database,Type,FieldSchema,Order</value>
-    <description>List of comma separated metastore object types that should be pinned in the cache</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.uris</name>
-    <value></value>
-    <description>URI for client to contact metastore server</description>
-  </property>
-
-  <property>
-    <name>hive.semantic.analyzer.factory.impl</name>
-    <value>org.apache.hivealog.cli.HCatSemanticAnalyzerFactory</value>
-    <description>controls which SemanticAnalyzerFactory implemenation class is used by CLI</description>
-  </property>
-
-  <property>
-    <name>hadoop.clientside.fs.operations</name>
-    <value>true</value>
-    <description>FS operations are owned by client</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.client.socket.timeout</name>
-    <value>60</value>
-    <description>MetaStore Client socket timeout in seconds</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.execute.setugi</name>
-    <value>true</value>
-    <description>In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.</description>
-  </property>
-
-  <property>
-    <name>hive.security.authorization.enabled</name>
-    <value>true</value>
-    <description>enable or disable the hive client authorization</description>
-  </property>
-
-  <property>
-    <name>hive.security.authorization.manager</name>
-    <value>org.apache.hcatalog.security.HdfsAuthorizationProvider</value>
-    <description>the hive client authorization manager class name.
-    The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
-  </property>
-
-  <property>
-    <name>hive.server2.enable.doAs</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>fs.hdfs.impl.disable.cache</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>fs.file.impl.disable.cache</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.enforce.bucketing</name>
-    <value>true</value>
-    <description>Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced.</description>
-  </property>
-
-  <property>
-    <name>hive.enforce.sorting</name>
-    <value>true</value>
-    <description>Whether sorting is enforced. If true, while inserting into the table, sorting is enforced.</description>
-  </property>
-
-  <property>
-    <name>hive.map.aggr</name>
-    <value>true</value>
-    <description>Whether to use map-side aggregation in Hive Group By queries.</description>
-  </property>
-
-  <property>
-    <name>hive.optimize.bucketmapjoin</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.optimize.bucketmapjoin.sortedmerge</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.mapred.reduce.tasks.speculative.execution</name>
-    <value>false</value>
-    <description>Whether speculative execution for reducers should be turned on.</description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join</name>
-    <value>true</value>
-    <description>Whether Hive enable the optimization about converting common
-      join into mapjoin based on the input file size.</description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.sortmerge.join</name>
-    <value>true</value>
-    <description>Will the join be automatically converted to a sort-merge join, if the joined tables pass
-      the criteria for sort-merge join.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.sortmerge.join.noconditionaltask</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join.noconditionaltask</name>
-    <value>true</value>
-    <description>Whether Hive enable the optimization about converting common join into mapjoin based on the input file
-      size. If this paramater is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the
-      specified size, the join is directly converted to a mapjoin (there is no conditional task).
-    </description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join.noconditionaltask.size</name>
-    <value>1000000000</value>
-    <description>If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. However, if it
-      is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, the join is directly
-      converted to a mapjoin(there is no conditional task). The default is 10MB.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.optimize.reducededuplication.min.reducer</name>
-    <value>1</value>
-    <description>Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS.
-      That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.
-      The optimization will be disabled if number of reducers is less than specified value.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.optimize.mapjoin.mapreduce</name>
-    <value>true</value>
-    <description>If hive.auto.convert.join is off, this parameter does not take
-      affect. If it is on, and if there are map-join jobs followed by a map-reduce
-      job (for e.g a group by), each map-only job is merged with the following
-      map-reduce job.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.mapjoin.bucket.cache.size</name>
-    <value>10000</value>
-    <description>
-      Size per reducer.The default is 1G, i.e if the input size is 10G, it
-      will use 10 reducers.
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HIVE/metainfo.xml
deleted file mode 100644
index 520ccec..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HIVE/metainfo.xml
+++ /dev/null
@@ -1,43 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
-    <version>0.11.0.1.3.0.0</version>
-
-    <components>        
-        <component>
-            <name>HIVE_METASTORE</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>HIVE_SERVER</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>MYSQL_SERVER</name>
-            <category>MASTER</category>
-        </component>
-        <component>
-            <name>HIVE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HUE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HUE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HUE/configuration/global.xml
deleted file mode 100644
index c49480f..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HUE/configuration/global.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>hue_pid_dir</name>
-    <value>/var/run/hue</value>
-    <description>Hue Pid Dir.</description>
-  </property>
-  <property>
-    <name>hue_log_dir</name>
-    <value>/var/log/hue</value>
-    <description>Hue Log Dir.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HUE/configuration/hue-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HUE/configuration/hue-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HUE/configuration/hue-site.xml
deleted file mode 100644
index 6eb52a2..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HUE/configuration/hue-site.xml
+++ /dev/null
@@ -1,290 +0,0 @@
-<?xml version="1.0"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more# Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with# contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.# this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0# The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with# (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at# the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0#     http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software# Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,# distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and# See the License for the specific language governing permissions and
-   limitations under the License.# limitations under the License.
--->
-
-<configuration>
-  <!-- General Hue server configuration properties -->
-  <property>
-      <name>send_debug_messages</name>
-      <value>1</value>
-      <description></description>
-  </property>
-
-  <property>
-    <name>database_logging</name>
-    <value>0</value>
-    <description>To show database transactions, set database_logging to 1.
-      default, database_logging=0</description>
-  </property>
-
-  <property>
-    <name>secret_key</name>
-    <value></value>
-    <description>This is used for secure hashing in the session store.</description>
-  </property>
-
-  <property>
-    <name>http_host</name>
-    <value>0.0.0.0</value>
-    <description>Webserver listens on this address and port</description>
-  </property>
-
-  <property>
-    <name>http_port</name>
-    <value>8000</value>
-    <description>Webserver listens on this address and port</description>
-  </property>
-
-  <property>
-    <name>time_zone</name>
-    <value>America/Los_Angeles</value>
-    <description>Time zone name</description>
-  </property>
-
-  <property>
-    <name>django_debug_mode</name>
-    <value>1</value>
-    <description>Turn off debug</description>
-  </property>
-
-  <property>
-    <name>use_cherrypy_server</name>
-    <value>false</value>
-    <description>Set to true to use CherryPy as the webserver, set to false
-      to use Spawning as the webserver. Defaults to Spawning if
-      key is not specified.</description>
-  </property>
-
-  <property>
-    <name>http_500_debug_mode</name>
-    <value>1</value>
-    <description>Turn off backtrace for server error</description>
-  </property>
-
-  <property>
-    <name>server_user</name>
-    <value></value>
-    <description>Webserver runs as this user</description>
-  </property>
-
-  <property>
-    <name>server_group</name>
-    <value></value>
-    <description>Webserver runs as this user</description>
-  </property>
-
-  <property>
-    <name>backend_auth_policy</name>
-    <value>desktop.auth.backend.AllowAllBackend</value>
-    <description>Authentication backend.</description>
-  </property>
-
-  <!-- Hue Database configuration properties -->
-  <property>
-    <name>db_engine</name>
-    <value>mysql</value>
-    <description>Configuration options for specifying the Desktop Database.</description>
-  </property>
-
-  <property>
-    <name>db_host</name>
-    <value>localhost</value>
-    <description>Configuration options for specifying the Desktop Database.</description>
-  </property>
-
-  <property>
-    <name>db_port</name>
-    <value>3306</value>
-    <description>Configuration options for specifying the Desktop Database.</description>
-  </property>
-
-  <property>
-    <name>db_user</name>
-    <value>sandbox</value>
-    <description>Configuration options for specifying the Desktop Database.</description>
-  </property>
-
-  <property>
-    <name>db_password</name>
-    <value>1111</value>
-    <description>Configuration options for specifying the Desktop Database.</description>
-  </property>
-
-  <property>
-    <name>db_name</name>
-    <value>sandbox</value>
-    <description>Configuration options for specifying the Desktop Database.</description>
-  </property>
-
-  <!-- Hue Email configuration properties -->
-  <property>
-    <name>smtp_host</name>
-    <value>localhost</value>
-    <description>The SMTP server information for email notification delivery.</description>
-  </property>
-
-  <property>
-    <name>smtp_port</name>
-    <value>25</value>
-    <description>The SMTP server information for email notification delivery.</description>
-  </property>
-
-  <property>
-    <name>smtp_user</name>
-    <value></value>
-    <description>The SMTP server information for email notification delivery.</description>
-  </property>
-
-  <property>
-    <name>smtp_password</name>
-    <value>25</value>
-    <description>The SMTP server information for email notification delivery.</description>
-  </property>
-
-  <property>
-    <name>tls</name>
-    <value>no</value>
-    <description>Whether to use a TLS (secure) connection when talking to the SMTP server.</description>
-  </property>
-
-  <property>
-    <name>default_from_email</name>
-    <value>sandbox@hortonworks.com</value>
-    <description>The SMTP server information for email notification delivery.</description>
-  </property>
-
-  <!-- Hue Hadoop configuration properties -->
-  <property>
-    <name>fs_defaultfs</name>
-    <value></value>
-    <description>Enter the filesystem uri. E.g
-      .:hdfs://sandbox:8020</description>
-  </property>
-
-  <property>
-    <name>webhdfs_url</name>
-    <value></value>
-    <description>Use WebHdfs/HttpFs as the communication mechanism. To fallback to
-      using the Thrift plugin (used in Hue 1.x), this must be uncommented
-      and explicitly set to the empty value.
-      Value e.g.: http://localhost:50070/webhdfs/v1/</description>
-  </property>
-
-  <property>
-    <name>jobtracker_host</name>
-    <value></value>
-    <description>Enter the host on which you are running the Hadoop JobTracker.</description>
-  </property>
-
-  <property>
-    <name>jobtracker_port</name>
-    <value>50030</value>
-    <description>The port where the JobTracker IPC listens on.</description>
-  </property>
-
-  <property>
-    <name>hadoop_mapred_home</name>
-    <value>/usr/lib/hadoop/lib</value>
-    <description>The SMTP server information for email notification delivery.</description>
-  </property>
-
-  <property>
-    <name>resourcemanager_host</name>
-    <value></value>
-    <description>Enter the host on which you are running the ResourceManager.</description>
-  </property>
-
-  <property>
-    <name>resourcemanager_port</name>
-    <value></value>
-    <description>The port where the ResourceManager IPC listens on.</description>
-  </property>
-
-  <!-- Hue Beeswax configuration properties -->
-  <property>
-    <name>hive_home_dir</name>
-    <value></value>
-    <description>Hive home directory.</description>
-  </property>
-
-  <property>
-    <name>hive_conf_dir</name>
-    <value></value>
-    <description>Hive configuration directory, where hive-site.xml is
-      located.</description>
-  </property>
-
-  <property>
-    <name>templeton_url</name>
-    <value></value>
-    <description>WebHcat http URL</description>
-  </property>
-
-  <!-- Hue shell types configuration -->
-  <property>
-    <name>pig_nice_name</name>
-    <value></value>
-    <description>Define and configure a new shell type pig</description>
-  </property>
-
-  <property>
-    <name>pig_shell_command</name>
-    <value>/usr/bin/pig -l /dev/null</value>
-    <description>Define and configure a new shell type pig.</description>
-  </property>
-
-  <property>
-    <name>pig_java_home</name>
-    <value></value>
-    <description>Define and configure a new shell type pig.</description>
-  </property>
-
-  <property>
-    <name>hbase_nice_name</name>
-    <value>HBase Shell</value>
-    <description>Define and configure a new shell type hbase</description>
-  </property>
-
-  <property>
-    <name>hbase_shell_command</name>
-    <value>/usr/bin/hbase shell</value>
-    <description>Define and configure a new shell type hbase.</description>
-  </property>
-
-  <property>
-    <name>bash_nice_name</name>
-    <value></value>
-    <description>Define and configure a new shell type bash for testing
-      only</description>
-  </property>
-
-  <property>
-    <name>bash_shell_command</name>
-    <value>/bin/bash</value>
-    <description>Define and configure a new shell type bash for testing only
-      .</description>
-  </property>
-
-  <!-- Hue Settings for the User Admin application -->
-  <property>
-    <name>whitelist</name>
-    <value>(localhost|127\.0\.0\.1):(50030|50070|50060|50075|50111)</value>
-    <description>proxy settings</description>
-  </property>
-
-</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HUE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HUE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HUE/metainfo.xml
deleted file mode 100644
index c6e384f..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/HUE/metainfo.xml
+++ /dev/null
@@ -1,31 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Hue is a graphical user interface to operate and develop
-      applications for Apache Hadoop.</comment>
-    <version>2.2.0</version>
-
-    <components>
-        <component>
-            <name>HUE_SERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/MAPREDUCE/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/MAPREDUCE/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/MAPREDUCE/configuration/capacity-scheduler.xml
deleted file mode 100644
index 8034d19..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/MAPREDUCE/configuration/capacity-scheduler.xml
+++ /dev/null
@@ -1,195 +0,0 @@
-<?xml version="1.0"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- This is the configuration file for the resource manager in Hadoop. -->
-<!-- You can configure various scheduling parameters related to queues. -->
-<!-- The properties for a queue follow a naming convention,such as, -->
-<!-- mapred.capacity-scheduler.queue.<queue-name>.property-name. -->
-
-<configuration>
-
-  <property>
-    <name>mapred.capacity-scheduler.maximum-system-jobs</name>
-    <value>3000</value>
-    <description>Maximum number of jobs in the system which can be initialized,
-     concurrently, by the CapacityScheduler.
-    </description>    
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.capacity</name>
-    <value>100</value>
-    <description>Percentage of the number of slots in the cluster that are
-      to be available for jobs in this queue.
-    </description>    
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.maximum-capacity</name>
-    <value>-1</value>
-    <description>
-	maximum-capacity defines a limit beyond which a queue cannot use the capacity of the cluster.
-	This provides a means to limit how much excess capacity a queue can use. By default, there is no limit.
-	The maximum-capacity of a queue can only be greater than or equal to its minimum capacity.
-        Default value of -1 implies a queue can use complete capacity of the cluster.
-
-        This property could be to curtail certain jobs which are long running in nature from occupying more than a 
-        certain percentage of the cluster, which in the absence of pre-emption, could lead to capacity guarantees of 
-        other queues being affected.
-        
-        One important thing to note is that maximum-capacity is a percentage , so based on the cluster's capacity
-        the max capacity would change. So if large no of nodes or racks get added to the cluster , max Capacity in 
-        absolute terms would increase accordingly.
-    </description>    
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.supports-priority</name>
-    <value>false</value>
-    <description>If true, priorities of jobs will be taken into 
-      account in scheduling decisions.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.minimum-user-limit-percent</name>
-    <value>100</value>
-    <description> Each queue enforces a limit on the percentage of resources 
-    allocated to a user at any given time, if there is competition for them. 
-    This user limit can vary between a minimum and maximum value. The former
-    depends on the number of users who have submitted jobs, and the latter is
-    set to this property value. For example, suppose the value of this 
-    property is 25. If two users have submitted jobs to a queue, no single 
-    user can use more than 50% of the queue resources. If a third user submits
-    a job, no single user can use more than 33% of the queue resources. With 4 
-    or more users, no user can use more than 25% of the queue's resources. A 
-    value of 100 implies no user limits are imposed. 
-    </description>
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.user-limit-factor</name>
-    <value>1</value>
-    <description>The multiple of the queue capacity which can be configured to 
-    allow a single user to acquire more slots. 
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks</name>
-    <value>200000</value>
-    <description>The maximum number of tasks, across all jobs in the queue, 
-    which can be initialized concurrently. Once the queue's jobs exceed this 
-    limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks-per-user</name>
-    <value>100000</value>
-    <description>The maximum number of tasks per-user, across all the of the 
-    user's jobs in the queue, which can be initialized concurrently. Once the 
-    user's jobs exceed this limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.init-accept-jobs-factor</name>
-    <value>10</value>
-    <description>The multipe of (maximum-system-jobs * queue-capacity) used to 
-    determine the number of jobs which are accepted by the scheduler.  
-    </description>
-  </property>
-
-  <!-- The default configuration settings for the capacity task scheduler -->
-  <!-- The default values would be applied to all the queues which don't have -->
-  <!-- the appropriate property for the particular queue -->
-  <property>
-    <name>mapred.capacity-scheduler.default-supports-priority</name>
-    <value>false</value>
-    <description>If true, priorities of jobs will be taken into 
-      account in scheduling decisions by default in a job queue.
-    </description>
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.default-minimum-user-limit-percent</name>
-    <value>100</value>
-    <description>The percentage of the resources limited to a particular user
-      for the job queue at any given point of time by default.
-    </description>
-  </property>
-
-
-  <property>
-    <name>mapred.capacity-scheduler.default-user-limit-factor</name>
-    <value>1</value>
-    <description>The default multiple of queue-capacity which is used to 
-    determine the amount of slots a single user can consume concurrently.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.default-maximum-active-tasks-per-queue</name>
-    <value>200000</value>
-    <description>The default maximum number of tasks, across all jobs in the 
-    queue, which can be initialized concurrently. Once the queue's jobs exceed 
-    this limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.default-maximum-active-tasks-per-user</name>
-    <value>100000</value>
-    <description>The default maximum number of tasks per-user, across all the of 
-    the user's jobs in the queue, which can be initialized concurrently. Once 
-    the user's jobs exceed this limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.default-init-accept-jobs-factor</name>
-    <value>10</value>
-    <description>The default multipe of (maximum-system-jobs * queue-capacity) 
-    used to determine the number of jobs which are accepted by the scheduler.  
-    </description>
-  </property>
-
-  <!-- Capacity scheduler Job Initialization configuration parameters -->
-  <property>
-    <name>mapred.capacity-scheduler.init-poll-interval</name>
-    <value>5000</value>
-    <description>The amount of time in miliseconds which is used to poll 
-    the job queues for jobs to initialize.
-    </description>
-  </property>
-  <property>
-    <name>mapred.capacity-scheduler.init-worker-threads</name>
-    <value>5</value>
-    <description>Number of worker threads which would be used by
-    Initialization poller to initialize jobs in a set of queue.
-    If number mentioned in property is equal to number of job queues
-    then a single thread would initialize jobs in a queue. If lesser
-    then a thread would get a set of queues assigned. If the number
-    is greater then number of threads would be equal to number of 
-    job queues.
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/MAPREDUCE/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/MAPREDUCE/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/MAPREDUCE/configuration/core-site.xml
deleted file mode 100644
index 3a2af49..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/MAPREDUCE/configuration/core-site.xml
+++ /dev/null
@@ -1,20 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<configuration>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/MAPREDUCE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/MAPREDUCE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/MAPREDUCE/configuration/global.xml
deleted file mode 100644
index 2fc33c9..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/MAPREDUCE/configuration/global.xml
+++ /dev/null
@@ -1,160 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>jobtracker_host</name>
-    <value></value>
-    <description>JobTracker Host.</description>
-  </property>
-  <property>
-    <name>tasktracker_hosts</name>
-    <value></value>
-    <description>TaskTracker hosts.</description>
-  </property>
-  <property>
-    <name>mapred_local_dir</name>
-    <value>/hadoop/mapred</value>
-    <description>MapRed Local Directories.</description>
-  </property>
-  <property>
-    <name>mapred_system_dir</name>
-    <value>/mapred/system</value>
-    <description>MapRed System Directories.</description>
-  </property>
-  <property>
-    <name>scheduler_name</name>
-    <value>org.apache.hadoop.mapred.CapacityTaskScheduler</value>
-    <description>MapRed Capacity Scheduler.</description>
-  </property>
-  <property>
-    <name>jtnode_opt_newsize</name>
-    <value>200</value>
-    <description>Mem New Size.</description>
-  </property>
-  <property>
-    <name>jtnode_opt_maxnewsize</name>
-    <value>200</value>
-    <description>Max New size.</description>
-  </property>
-  <property>
-    <name>hadoop_heapsize</name>
-    <value>1024</value>
-    <description>Hadoop maximum Java heap size</description>
-  </property>
-  <property>
-    <name>jtnode_heapsize</name>
-    <value>1024</value>
-    <description>Maximum Java heap size for JobTracker in MB (Java option -Xmx)</description>
-  </property>
-  <property>
-    <name>mapred_map_tasks_max</name>
-    <value>4</value>
-    <description>Number of slots that Map tasks that run simultaneously can occupy on a TaskTracker</description>
-  </property>
-  <property>
-    <name>mapred_red_tasks_max</name>
-    <value>2</value>
-    <description>Number of slots that Reduce tasks that run simultaneously can occupy on a TaskTracker</description>
-  </property>
-  <property>
-    <name>mapred_cluster_map_mem_mb</name>
-    <value>-1</value>
-    <description>The virtual memory size of a single Map slot in the MapReduce framework</description>
-  </property>
-  <property>
-    <name>mapred_cluster_red_mem_mb</name>
-    <value>-1</value>
-    <description>The virtual memory size of a single Reduce slot in the MapReduce framework</description>
-  </property>
-  <property>
-    <name>mapred_job_map_mem_mb</name>
-    <value>-1</value>
-    <description>Virtual memory for single Map task</description>
-  </property>
-  <property>
-    <name>mapred_child_java_opts_sz</name>
-    <value>768</value>
-    <description>Java options for the TaskTracker child processes.</description>
-  </property>
-  <property>
-    <name>io_sort_mb</name>
-    <value>200</value>
-    <description>The total amount of Map-side buffer memory to use while sorting files (Expert-only configuration).</description>
-  </property>
-  <property>
-    <name>io_sort_spill_percent</name>
-    <value>0.9</value>
-    <description>Percentage of sort buffer used for record collection (Expert-only configuration.</description>
-  </property>
-  <property>
-    <name>mapreduce_userlog_retainhours</name>
-    <value>24</value>
-    <description>The maximum time, in hours, for which the user-logs are to be retained after the job completion.</description>
-  </property>
-  <property>
-    <name>maxtasks_per_job</name>
-    <value>-1</value>
-    <description>Maximum number of tasks for a single Job</description>
-  </property>
-  <property>
-    <name>lzo_enabled</name>
-    <value>true</value>
-    <description>LZO compression enabled</description>
-  </property>
-  <property>
-    <name>snappy_enabled</name>
-    <value>true</value>
-    <description>LZO compression enabled</description>
-  </property>
-  <property>
-    <name>rca_enabled</name>
-    <value>true</value>
-    <description>Enable Job Diagnostics.</description>
-  </property>
-  <property>
-    <name>mapred_hosts_exclude</name>
-    <value></value>
-    <description>Exclude entered hosts</description>
-  </property>
-  <property>
-    <name>mapred_hosts_include</name>
-    <value></value>
-    <description>Include entered hosts</description>
-  </property>
-  <property>
-    <name>mapred_jobstatus_dir</name>
-    <value>file:////mapred/jobstatus</value>
-    <description>Job Status directory</description>
-  </property>
-  <property>
-    <name>task_controller</name>
-    <value>org.apache.hadoop.mapred.DefaultTaskController</value>
-    <description>Task Controller.</description>
-  </property>
-  <property>
-    <name>mapred_user</name>
-    <value>mapred</value>
-    <description>MapReduce User.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/MAPREDUCE/configuration/mapred-queue-acls.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/MAPREDUCE/configuration/mapred-queue-acls.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/MAPREDUCE/configuration/mapred-queue-acls.xml
deleted file mode 100644
index ce12380..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/MAPREDUCE/configuration/mapred-queue-acls.xml
+++ /dev/null
@@ -1,39 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- mapred-queue-acls.xml -->
-<configuration>
-
-
-<!-- queue default -->
-
-  <property>
-    <name>mapred.queue.default.acl-submit-job</name>
-    <value>*</value>
-  </property>
-
-  <property>
-    <name>mapred.queue.default.acl-administer-jobs</name>
-    <value>*</value>
-  </property>
-
-  <!-- END ACLs -->
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/MAPREDUCE/configuration/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/MAPREDUCE/configuration/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/MAPREDUCE/configuration/mapred-site.xml
deleted file mode 100644
index df6ca71..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/MAPREDUCE/configuration/mapred-site.xml
+++ /dev/null
@@ -1,537 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-<!-- i/o properties -->
-
-  <property>
-    <name>io.sort.mb</name>
-    <value></value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>io.sort.record.percent</name>
-    <value>.2</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>io.sort.spill.percent</name>
-    <value></value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>io.sort.factor</name>
-    <value>100</value>
-    <description>No description</description>
-  </property>
-
-<!-- map/reduce properties -->
-
-<property>
-  <name>mapred.tasktracker.tasks.sleeptime-before-sigkill</name>
-  <value>250</value>
-  <description>Normally, this is the amount of time before killing
-  processes, and the recommended-default is 5.000 seconds - a value of
-  5000 here.  In this case, we are using it solely to blast tasks before
-  killing them, and killing them very quickly (1/4 second) to guarantee
-  that we do not leave VMs around for later jobs.
-  </description>
-</property>
-
-  <property>
-    <name>mapred.job.tracker.handler.count</name>
-    <value>50</value>
-    <description>
-    The number of server threads for the JobTracker. This should be roughly
-    4% of the number of tasktracker nodes.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.system.dir</name>
-    <value>/mapred/system</value>
-    <description>No description</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>No description</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker.http.address</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>No description</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <!-- cluster specific -->
-    <name>mapred.local.dir</name>
-    <value></value>
-    <description>No description</description>
-    <final>true</final>
-  </property>
-
-  <property>
-  <name>mapreduce.cluster.administrators</name>
-  <value> hadoop</value>
-  </property>
-
-  <property>
-    <name>mapred.reduce.parallel.copies</name>
-    <value>30</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.tasktracker.map.tasks.maximum</name>
-    <value></value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.tasktracker.reduce.tasks.maximum</name>
-    <value></value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>tasktracker.http.threads</name>
-    <value>50</value>
-  </property>
-
-  <property>
-    <name>mapred.map.tasks.speculative.execution</name>
-    <value>false</value>
-    <description>If true, then multiple instances of some map tasks
-               may be executed in parallel.</description>
-  </property>
-
-  <property>
-    <name>mapred.reduce.tasks.speculative.execution</name>
-    <value>false</value>
-    <description>If true, then multiple instances of some reduce tasks
-               may be executed in parallel.</description>
-  </property>
-
-  <property>
-    <name>mapred.reduce.slowstart.completed.maps</name>
-    <value>0.05</value>
-  </property>
-
-  <property>
-    <name>mapred.inmem.merge.threshold</name>
-    <value>1000</value>
-    <description>The threshold, in terms of the number of files
-  for the in-memory merge process. When we accumulate threshold number of files
-  we initiate the in-memory merge and spill to disk. A value of 0 or less than
-  0 indicates we want to DON'T have any threshold and instead depend only on
-  the ramfs's memory consumption to trigger the merge.
-  </description>
-  </property>
-
-  <property>
-    <name>mapred.job.shuffle.merge.percent</name>
-    <value>0.66</value>
-    <description>The usage threshold at which an in-memory merge will be
-  initiated, expressed as a percentage of the total memory allocated to
-  storing in-memory map outputs, as defined by
-  mapred.job.shuffle.input.buffer.percent.
-  </description>
-  </property>
-
-  <property>
-    <name>mapred.job.shuffle.input.buffer.percent</name>
-    <value>0.7</value>
-    <description>The percentage of memory to be allocated from the maximum heap
-  size to storing map outputs during the shuffle.
-  </description>
-  </property>
-
-  <property>
-    <name>mapred.map.output.compression.codec</name>
-    <value>org.apache.hadoop.io.compress.SnappyCodec</value>
-    <description>If the map outputs are compressed, how should they be
-      compressed
-    </description>
-  </property>
-
-<property>
-  <name>mapred.output.compression.type</name>
-  <value>BLOCK</value>
-  <description>If the job outputs are to compressed as SequenceFiles, how should
-               they be compressed? Should be one of NONE, RECORD or BLOCK.
-  </description>
-</property>
-
-
-  <property>
-    <name>mapred.jobtracker.completeuserjobs.maximum</name>
-    <value>5</value>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.taskScheduler</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.restart.recover</name>
-    <value>false</value>
-    <description>"true" to enable (job) recovery upon restart,
-               "false" to start afresh
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.reduce.input.buffer.percent</name>
-    <value>0.0</value>
-    <description>The percentage of memory- relative to the maximum heap size- to
-  retain map outputs during the reduce. When the shuffle is concluded, any
-  remaining map outputs in memory must consume less than this threshold before
-  the reduce can begin.
-  </description>
-  </property>
-
- <property>
-  <name>mapreduce.reduce.input.limit</name>
-  <value>10737418240</value>
-  <description>The limit on the input size of the reduce. (This value
-  is 10 Gb.)  If the estimated input size of the reduce is greater than
-  this value, job is failed. A value of -1 means that there is no limit
-  set. </description>
-</property>
-
-
-  <!-- copied from kryptonite configuration -->
-  <property>
-    <name>mapred.compress.map.output</name>
-    <value></value>
-  </property>
-
-
-  <property>
-    <name>mapred.task.timeout</name>
-    <value>600000</value>
-    <description>The number of milliseconds before a task will be
-  terminated if it neither reads an input, writes an output, nor
-  updates its status string.
-  </description>
-  </property>
-
-  <property>
-    <name>jetty.connector</name>
-    <value>org.mortbay.jetty.nio.SelectChannelConnector</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.task.tracker.task-controller</name>
-    <value>org.apache.hadoop.mapred.DefaultTaskController</value>
-   <description>
-     TaskController which is used to launch and manage task execution.
-  </description>
-  </property>
-
-  <property>
-    <name>mapred.child.root.logger</name>
-    <value>INFO,TLA</value>
-  </property>
-
-  <property>
-    <name>mapred.child.java.opts</name>
-    <value></value>
-
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.cluster.map.memory.mb</name>
-    <value>1536</value>
-  </property>
-
-  <property>
-    <name>mapred.cluster.reduce.memory.mb</name>
-    <value>2048</value>
-  </property>
-
-  <property>
-    <name>mapred.job.map.memory.mb</name>
-    <value>1536</value>
-  </property>
-
-  <property>
-    <name>mapred.job.reduce.memory.mb</name>
-    <value>2048</value>
-  </property>
-
-  <property>
-    <name>mapred.cluster.max.map.memory.mb</name>
-    <value>6144</value>
-  </property>
-
-  <property>
-    <name>mapred.cluster.max.reduce.memory.mb</name>
-    <value>4096</value>
-  </property>
-
-<property>
-  <name>mapred.hosts</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.hosts.exclude</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.max.tracker.blacklists</name>
-  <value>16</value>
-  <description>
-    if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted
-  </description>
-</property>
-
-<property>
-  <name>mapred.healthChecker.script.path</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.healthChecker.interval</name>
-  <value>135000</value>
-</property>
-
-<property>
-  <name>mapred.healthChecker.script.timeout</name>
-  <value>60000</value>
-</property>
-
-<property>
-  <name>mapred.job.tracker.persist.jobstatus.active</name>
-  <value>false</value>
-  <description>Indicates if persistency of job status information is
-  active or not.
-  </description>
-</property>
-
-<property>
-  <name>mapred.job.tracker.persist.jobstatus.hours</name>
-  <value>1</value>
-  <description>The number of hours job status information is persisted in DFS.
-    The job status information will be available after it drops of the memory
-    queue and between jobtracker restarts. With a zero value the job status
-    information is not persisted at all in DFS.
-  </description>
-</property>
-
-<property>
-  <name>mapred.job.tracker.persist.jobstatus.dir</name>
-  <value></value>
-  <description>The directory where the job status information is persisted
-   in a file system to be available after it drops of the memory queue and
-   between jobtracker restarts.
-  </description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.retirejob.check</name>
-  <value>10000</value>
-</property>
-
-<property>
-  <name>mapred.jobtracker.retirejob.interval</name>
-  <value>21600000</value>
-</property>
-
-<property>
-  <name>mapred.job.tracker.history.completed.location</name>
-  <value>/mapred/history/done</value>
-  <description>No description</description>
-</property>
-
-<property>
-  <name>mapred.task.maxvmem</name>
-  <value></value>
-  <final>true</final>
-   <description>No description</description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.maxtasks.per.job</name>
-  <value></value>
-  <final>true</final>
-  <description>The maximum number of tasks for a single job.
-  A value of -1 indicates that there is no maximum.  </description>
-</property>
-
-<property>
-  <name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name>
-  <value>false</value>
-</property>
-
-<property>
-  <name>mapred.userlog.retain.hours</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.job.reuse.jvm.num.tasks</name>
-  <value>1</value>
-  <description>
-    How many tasks to run per jvm. If set to -1, there is no limit
-  </description>
-  <final>true</final>
-</property>
-
-<property>
-  <name>mapreduce.jobtracker.kerberos.principal</name>
-  <value></value>
-  <description>
-      JT user name key.
- </description>
-</property>
-
-<property>
-  <name>mapreduce.tasktracker.kerberos.principal</name>
-   <value></value>
-  <description>
-       tt user name key. "_HOST" is replaced by the host name of the task tracker.
-   </description>
-</property>
-
-
-  <property>
-    <name>hadoop.job.history.user.location</name>
-    <value>none</value>
-    <final>true</final>
-  </property>
-
-
- <property>
-   <name>mapreduce.jobtracker.keytab.file</name>
-   <value></value>
-   <description>
-       The keytab for the jobtracker principal.
-   </description>
-
-</property>
-
- <property>
-   <name>mapreduce.tasktracker.keytab.file</name>
-   <value></value>
-    <description>The filename of the keytab for the task tracker</description>
- </property>
-
- <property>
-   <name>mapred.task.tracker.http.address</name>
-   <value></value>
-   <description>Http address for task tracker.</description>
- </property>
-
- <property>
-   <name>mapreduce.jobtracker.staging.root.dir</name>
-   <value>/user</value>
- <description>The Path prefix for where the staging directories should be placed. The next level is always the user's
-   name. It is a path in the default file system.</description>
- </property>
-
- <property>
-      <name>mapreduce.tasktracker.group</name>
-      <value>hadoop</value>
-      <description>The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.</description>
-
- </property>
-
-  <property>
-    <name>mapreduce.jobtracker.split.metainfo.maxsize</name>
-    <value>50000000</value>
-    <final>true</final>
-     <description>If the size of the split metainfo file is larger than this, the JobTracker will fail the job during
-    initialize.
-   </description>
-  </property>
-  <property>
-    <name>mapreduce.history.server.embedded</name>
-    <value>false</value>
-    <description>Should job history server be embedded within Job tracker
-process</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapreduce.history.server.http.address</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>Http address of the history server</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapreduce.jobhistory.kerberos.principal</name>
-    <!-- cluster variant -->
-  <value></value>
-    <description>Job history user name key. (must map to same user as JT
-user)</description>
-  </property>
-
- <property>
-   <name>mapreduce.jobhistory.keytab.file</name>
-    <!-- cluster variant -->
-   <value></value>
-   <description>The keytab for the job history server principal.</description>
- </property>
-
-<property>
-  <name>mapred.jobtracker.blacklist.fault-timeout-window</name>
-  <value>180</value>
-  <description>
-    3-hour sliding window (value is in minutes)
-  </description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.blacklist.fault-bucket-width</name>
-  <value>15</value>
-  <description>
-    15-minute bucket size (value is in minutes)
-  </description>
-</property>
-
-<property>
-  <name>mapred.queue.names</name>
-  <value>default</value>
-  <description> Comma separated list of queues configured for this jobtracker.</description>
-</property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/MAPREDUCE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/MAPREDUCE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/MAPREDUCE/metainfo.xml
deleted file mode 100644
index b4a95a0..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/MAPREDUCE/metainfo.xml
+++ /dev/null
@@ -1,41 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>mapred</user>
-    <comment>Apache Hadoop Distributed Processing Framework</comment>
-    <version>1.2.0.1.3.0.0</version>
-
-    <components>
-        <component>
-            <name>JOBTRACKER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>TASKTRACKER</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>MAPREDUCE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/NAGIOS/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/NAGIOS/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/NAGIOS/configuration/global.xml
deleted file mode 100644
index 61a2b90..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/NAGIOS/configuration/global.xml
+++ /dev/null
@@ -1,50 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>nagios_user</name>
-    <value>nagios</value>
-    <description>Nagios Username.</description>
-  </property>
-  <property>
-    <name>nagios_group</name>
-    <value>nagios</value>
-    <description>Nagios Group.</description>
-  </property>
-  <property>
-    <name>nagios_web_login</name>
-    <value>nagiosadmin</value>
-    <description>Nagios web user.</description>
-  </property>
-  <property>
-    <name>nagios_web_password</name>
-    <value></value>
-    <description>Nagios Admin Password.</description>
-  </property>
-  <property>
-    <name>nagios_contact</name>
-    <value></value>
-    <description>Hadoop Admin Email.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/NAGIOS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/NAGIOS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/NAGIOS/metainfo.xml
deleted file mode 100644
index bd7de07..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/NAGIOS/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Nagios Monitoring and Alerting system</comment>
-    <version>3.2.3</version>
-
-    <components>
-        <component>
-            <name>NAGIOS_SERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/OOZIE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/OOZIE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/OOZIE/configuration/global.xml
deleted file mode 100644
index ddbf780..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/OOZIE/configuration/global.xml
+++ /dev/null
@@ -1,105 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>oozie_user</name>
-    <value>oozie</value>
-    <description>Oozie User.</description>
-  </property>
-  <property>
-    <name>oozieserver_host</name>
-    <value></value>
-    <description>Oozie Server Host.</description>
-  </property>
-  <property>
-    <name>oozie_database</name>
-    <value></value>
-    <description>Oozie Server Database.</description>
-  </property>
-  <property>
-    <name>oozie_derby_database</name>
-    <value>Derby</value>
-    <description>Oozie Derby Database.</description>
-  </property>
-  <property>
-    <name>oozie_existing_mysql_database</name>
-    <value>MySQL</value>
-    <description>Oozie MySQL Database.</description>
-  </property>
-  <property>
-    <name>oozie_existing_mysql_host</name>
-    <value></value>
-    <description>Existing MySQL Host.</description>
-  </property>
-  <property>
-    <name>oozie_existing_oracle_database</name>
-    <value>Oracle</value>
-    <description>Oracle Database</description>
-  </property>
-  <property>
-    <name>oozie_existing_oracle_host</name>
-    <value></value>
-    <description>Database Host.</description>
-  </property>
-  <property>
-    <name>oozie_ambari_database</name>
-    <value>MySQL</value>
-    <description>Database default.</description>
-  </property>
-  <property>
-    <name>oozie_ambari_host</name>
-    <value></value>
-    <description>Host on which databse will be created.</description>
-  </property>
-  <property>
-    <name>oozie_database_name</name>
-    <value>oozie</value>
-    <description>Database name used for the Oozie.</description>
-  </property>
-  <property>
-    <name>oozie_metastore_user_name</name>
-    <value>oozie</value>
-    <description>Database user name to use to connect to the database</description>
-  </property>
-  <property>
-    <name>oozie_metastore_user_passwd</name>
-    <value></value>
-    <description>Database password to use to connect to the database</description>
-  </property>
-  <property>
-    <name>oozie_data_dir</name>
-    <value>/hadoop/oozie/data</value>
-    <description>Data directory in which the Oozie DB exists</description>
-  </property>
-  <property>
-    <name>oozie_log_dir</name>
-    <value>/var/log/oozie</value>
-    <description>Directory for oozie logs</description>
-  </property>
-  <property>
-    <name>oozie_pid_dir</name>
-    <value>/var/run/oozie</value>
-    <description>Directory in which the pid files for oozie reside.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/OOZIE/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/OOZIE/configuration/oozie-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/OOZIE/configuration/oozie-site.xml
deleted file mode 100644
index eeed0d8..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/OOZIE/configuration/oozie-site.xml
+++ /dev/null
@@ -1,237 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-        
-       http://www.apache.org/licenses/LICENSE-2.0
-  
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->     
-
-<configuration>
-
-<!--
-    Refer to the oozie-default.xml file for the complete list of
-    Oozie configuration properties and their default values.
--->
-  <property>
-    <name>oozie.base.url</name>
-    <value>http://localhost:11000/oozie</value>
-    <description>Base Oozie URL.</description>
-   </property>
-
-  <property>
-    <name>oozie.system.id</name>
-    <value>oozie-${user.name}</value>
-    <description>
-    The Oozie system ID.
-    </description>
-   </property>
-
-   <property>
-     <name>oozie.systemmode</name>
-     <value>NORMAL</value>
-     <description>
-     System mode for  Oozie at startup.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.AuthorizationService.authorization.enabled</name>
-     <value>true</value>
-     <description>
-     Specifies whether security (user name/admin role) is enabled or not.
-     If disabled any user can manage Oozie system and manage any job.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.PurgeService.older.than</name>
-     <value>30</value>
-     <description>
-     Jobs older than this value, in days, will be purged by the PurgeService.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.PurgeService.purge.interval</name>
-     <value>3600</value>
-     <description>
-     Interval at which the purge service will run, in seconds.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.queue.size</name>
-     <value>1000</value>
-     <description>Max callable queue size</description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.threads</name>
-     <value>10</value>
-     <description>Number of threads used for executing callables</description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.callable.concurrency</name>
-     <value>3</value>
-     <description>
-     Maximum concurrency for a given callable type.
-     Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
-     Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
-     All commands that use action executors (action-start, action-end, action-kill and action-check) use
-     the action type as the callable type.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.coord.normal.default.timeout</name>
-     <value>120</value>
-     <description>Default timeout for a coordinator action input check (in minutes) for normal job.
-      -1 means infinite timeout</description>
-   </property>
-
-   <property>
-     <name>oozie.db.schema.name</name>
-     <value>oozie</value>
-     <description>
-      Oozie DataBase Name
-     </description>
-   </property>
-
-    <property>
-      <name>oozie.service.HadoopAccessorService.jobTracker.whitelist</name>
-      <value> </value>
-      <description>
-      Whitelisted job tracker for Oozie service.
-      </description>
-    </property>
-   
-    <property>
-      <name>oozie.authentication.type</name>
-      <value>simple</value>
-      <description>
-      </description>
-    </property>
-   
-    <property>
-      <name>oozie.service.HadoopAccessorService.nameNode.whitelist</name>
-      <value> </value>
-      <description>
-      </description>
-    </property>
-
-    <property>
-      <name>oozie.service.WorkflowAppService.system.libpath</name>
-      <value>/user/${user.name}/share/lib</value>
-      <description>
-      System library path to use for workflow applications.
-      This path is added to workflow application if their job properties sets
-      the property 'oozie.use.system.libpath' to true.
-      </description>
-    </property>
-
-    <property>
-      <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
-      <value>false</value>
-      <description>
-      If set to true, submissions of MapReduce and Pig jobs will include
-      automatically the system library path, thus not requiring users to
-      specify where the Pig JAR files are. Instead, the ones from the system
-      library path are used.
-      </description>
-    </property>
-    <property>
-      <name>oozie.authentication.kerberos.name.rules</name>
-      <value>DEFAULT</value>
-      <description>The mapping from kerberos principal names to local OS user names.</description>
-    </property>
-    <property>
-      <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
-      <value>*=/etc/hadoop/conf</value>
-      <description>
-          Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
-          the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
-          used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
-          the relevant Hadoop *-site.xml files. If the path is relative is looked within
-          the Oozie configuration directory; though the path can be absolute (i.e. to point
-          to Hadoop client conf/ directories in the local filesystem.
-      </description>
-    </property>
-    <property>
-        <name>oozie.service.ActionService.executor.ext.classes</name>
-        <value>org.apache.oozie.action.email.EmailActionExecutor,
-org.apache.oozie.action.hadoop.HiveActionExecutor,
-org.apache.oozie.action.hadoop.ShellActionExecutor,
-org.apache.oozie.action.hadoop.SqoopActionExecutor,
-org.apache.oozie.action.hadoop.DistcpActionExecutor</value>
-    </property>
-
-    <property>
-        <name>oozie.service.SchemaService.wf.ext.schemas</name>
-        <value>shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd</value>
-    </property>
-    <property>
-        <name>oozie.service.JPAService.create.db.schema</name>
-        <value>false</value>
-        <description>
-            Creates Oozie DB.
-
-            If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
-            If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.driver</name>
-        <value>org.apache.derby.jdbc.EmbeddedDriver</value>
-        <description>
-            JDBC driver class.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.url</name>
-        <value>jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true</value>
-        <description>
-            JDBC URL.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.username</name>
-        <value>sa</value>
-        <description>
-            DB user name.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.password</name>
-        <value> </value>
-        <description>
-            DB user password.
-
-            IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
-                       if empty Configuration assumes it is NULL.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.pool.max.active.conn</name>
-        <value>10</value>
-        <description>
-             Max number of connections.
-        </description>
-    </property>
-</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/OOZIE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/OOZIE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/OOZIE/metainfo.xml
deleted file mode 100644
index 46460b4..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/OOZIE/metainfo.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>System for workflow coordination and execution of Apache Hadoop jobs</comment>
-    <version>3.3.2.1.3.0.0</version>
-
-    <components>
-        <component>
-            <name>OOZIE_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>OOZIE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/PIG/configuration/pig.properties
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/PIG/configuration/pig.properties b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/PIG/configuration/pig.properties
deleted file mode 100644
index 01000b5..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/PIG/configuration/pig.properties
+++ /dev/null
@@ -1,52 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Pig default configuration file. All values can be overwritten by pig.properties and command line arguments.
-# see bin/pig -help
-
-# brief logging (no timestamps)
-brief=false
-
-#debug level, INFO is default
-debug=INFO
-
-#verbose print all log messages to screen (default to print only INFO and above to screen)
-verbose=false
-
-#exectype local|mapreduce, mapreduce is default
-exectype=mapreduce
-
-#Enable insertion of information about script into hadoop job conf 
-pig.script.info.enabled=true
-
-#Do not spill temp files smaller than this size (bytes)
-pig.spill.size.threshold=5000000
-#EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
-#This should help reduce the number of files being spilled.
-pig.spill.gc.activation.size=40000000
-
-#the following two parameters are to help estimate the reducer number
-pig.exec.reducers.bytes.per.reducer=1000000000
-pig.exec.reducers.max=999
-
-#Temporary location to store the intermediate data.
-pig.temp.dir=/tmp/
-
-#Threshold for merging FRJoin fragment files
-pig.files.concatenation.threshold=100
-pig.optimistic.files.concatenation=false;
-
-pig.disable.counter=false


[07/14] AMBARI-3777. Remove HDPLocal stack from stack definition. (yusaku)

Posted by yu...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/OOZIE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/OOZIE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/OOZIE/configuration/global.xml
deleted file mode 100644
index ddbf780..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/OOZIE/configuration/global.xml
+++ /dev/null
@@ -1,105 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>oozie_user</name>
-    <value>oozie</value>
-    <description>Oozie User.</description>
-  </property>
-  <property>
-    <name>oozieserver_host</name>
-    <value></value>
-    <description>Oozie Server Host.</description>
-  </property>
-  <property>
-    <name>oozie_database</name>
-    <value></value>
-    <description>Oozie Server Database.</description>
-  </property>
-  <property>
-    <name>oozie_derby_database</name>
-    <value>Derby</value>
-    <description>Oozie Derby Database.</description>
-  </property>
-  <property>
-    <name>oozie_existing_mysql_database</name>
-    <value>MySQL</value>
-    <description>Oozie MySQL Database.</description>
-  </property>
-  <property>
-    <name>oozie_existing_mysql_host</name>
-    <value></value>
-    <description>Existing MySQL Host.</description>
-  </property>
-  <property>
-    <name>oozie_existing_oracle_database</name>
-    <value>Oracle</value>
-    <description>Oracle Database</description>
-  </property>
-  <property>
-    <name>oozie_existing_oracle_host</name>
-    <value></value>
-    <description>Database Host.</description>
-  </property>
-  <property>
-    <name>oozie_ambari_database</name>
-    <value>MySQL</value>
-    <description>Database default.</description>
-  </property>
-  <property>
-    <name>oozie_ambari_host</name>
-    <value></value>
-    <description>Host on which databse will be created.</description>
-  </property>
-  <property>
-    <name>oozie_database_name</name>
-    <value>oozie</value>
-    <description>Database name used for the Oozie.</description>
-  </property>
-  <property>
-    <name>oozie_metastore_user_name</name>
-    <value>oozie</value>
-    <description>Database user name to use to connect to the database</description>
-  </property>
-  <property>
-    <name>oozie_metastore_user_passwd</name>
-    <value></value>
-    <description>Database password to use to connect to the database</description>
-  </property>
-  <property>
-    <name>oozie_data_dir</name>
-    <value>/hadoop/oozie/data</value>
-    <description>Data directory in which the Oozie DB exists</description>
-  </property>
-  <property>
-    <name>oozie_log_dir</name>
-    <value>/var/log/oozie</value>
-    <description>Directory for oozie logs</description>
-  </property>
-  <property>
-    <name>oozie_pid_dir</name>
-    <value>/var/run/oozie</value>
-    <description>Directory in which the pid files for oozie reside.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/OOZIE/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/OOZIE/configuration/oozie-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/OOZIE/configuration/oozie-site.xml
deleted file mode 100644
index 1f83735..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/OOZIE/configuration/oozie-site.xml
+++ /dev/null
@@ -1,237 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-        
-       http://www.apache.org/licenses/LICENSE-2.0
-  
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<configuration>
-
-  <!--
-      Refer to the oozie-default.xml file for the complete list of
-      Oozie configuration properties and their default values.
-  -->
-  <property>
-    <name>oozie.base.url</name>
-    <value>http://localhost:11000/oozie</value>
-    <description>Base Oozie URL.</description>
-  </property>
-
-  <property>
-    <name>oozie.system.id</name>
-    <value>oozie-${user.name}</value>
-    <description>
-      The Oozie system ID.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.systemmode</name>
-    <value>NORMAL</value>
-    <description>
-      System mode for  Oozie at startup.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.AuthorizationService.authorization.enabled</name>
-    <value>true</value>
-    <description>
-      Specifies whether security (user name/admin role) is enabled or not.
-      If disabled any user can manage Oozie system and manage any job.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.PurgeService.older.than</name>
-    <value>30</value>
-    <description>
-      Jobs older than this value, in days, will be purged by the PurgeService.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.PurgeService.purge.interval</name>
-    <value>3600</value>
-    <description>
-      Interval at which the purge service will run, in seconds.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.CallableQueueService.queue.size</name>
-    <value>1000</value>
-    <description>Max callable queue size</description>
-  </property>
-
-  <property>
-    <name>oozie.service.CallableQueueService.threads</name>
-    <value>10</value>
-    <description>Number of threads used for executing callables</description>
-  </property>
-
-  <property>
-    <name>oozie.service.CallableQueueService.callable.concurrency</name>
-    <value>3</value>
-    <description>
-      Maximum concurrency for a given callable type.
-      Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
-      Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
-      All commands that use action executors (action-start, action-end, action-kill and action-check) use
-      the action type as the callable type.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.coord.normal.default.timeout</name>
-    <value>120</value>
-    <description>Default timeout for a coordinator action input check (in minutes) for normal job.
-      -1 means infinite timeout</description>
-  </property>
-
-  <property>
-    <name>oozie.db.schema.name</name>
-    <value>oozie</value>
-    <description>
-      Oozie DataBase Name
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.HadoopAccessorService.jobTracker.whitelist</name>
-    <value> </value>
-    <description>
-      Whitelisted job tracker for Oozie service.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.authentication.type</name>
-    <value>simple</value>
-    <description>
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.HadoopAccessorService.nameNode.whitelist</name>
-    <value> </value>
-    <description>
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.WorkflowAppService.system.libpath</name>
-    <value>/user/${user.name}/share/lib</value>
-    <description>
-      System library path to use for workflow applications.
-      This path is added to workflow application if their job properties sets
-      the property 'oozie.use.system.libpath' to true.
-    </description>
-  </property>
-
-  <property>
-    <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
-    <value>false</value>
-    <description>
-      If set to true, submissions of MapReduce and Pig jobs will include
-      automatically the system library path, thus not requiring users to
-      specify where the Pig JAR files are. Instead, the ones from the system
-      library path are used.
-    </description>
-  </property>
-  <property>
-    <name>oozie.authentication.kerberos.name.rules</name>
-    <value>DEFAULT</value>
-    <description>The mapping from kerberos principal names to local OS user names.</description>
-  </property>
-  <property>
-    <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
-    <value>*=/etc/hadoop/conf</value>
-    <description>
-      Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
-      the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
-      used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
-      the relevant Hadoop *-site.xml files. If the path is relative is looked within
-      the Oozie configuration directory; though the path can be absolute (i.e. to point
-      to Hadoop client conf/ directories in the local filesystem.
-    </description>
-  </property>
-  <property>
-    <name>oozie.service.ActionService.executor.ext.classes</name>
-    <value>org.apache.oozie.action.email.EmailActionExecutor,
-      org.apache.oozie.action.hadoop.HiveActionExecutor,
-      org.apache.oozie.action.hadoop.ShellActionExecutor,
-      org.apache.oozie.action.hadoop.SqoopActionExecutor,
-      org.apache.oozie.action.hadoop.DistcpActionExecutor</value>
-  </property>
-
-  <property>
-    <name>oozie.service.SchemaService.wf.ext.schemas</name>
-    <value>shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd</value>
-  </property>
-  <property>
-    <name>oozie.service.JPAService.create.db.schema</name>
-    <value>false</value>
-    <description>
-      Creates Oozie DB.
-
-      If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
-      If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.JPAService.jdbc.driver</name>
-    <value>org.apache.derby.jdbc.EmbeddedDriver</value>
-    <description>
-      JDBC driver class.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.JPAService.jdbc.url</name>
-    <value>jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true</value>
-    <description>
-      JDBC URL.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.JPAService.jdbc.username</name>
-    <value>oozie</value>
-    <description>
-      DB user name.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.JPAService.jdbc.password</name>
-    <value> </value>
-    <description>
-      DB user password.
-
-      IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
-      if empty Configuration assumes it is NULL.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.JPAService.pool.max.active.conn</name>
-    <value>10</value>
-    <description>
-      Max number of connections.
-    </description>
-  </property>
-</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/OOZIE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/OOZIE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/OOZIE/metainfo.xml
deleted file mode 100644
index 3f9fff5..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/OOZIE/metainfo.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>System for workflow coordination and execution of Apache Hadoop jobs.  This also includes the installation of the optional Oozie Web Console which relies on and will install the &lt;a target="_blank" href="http://www.sencha.com/products/extjs/license/"&gt;ExtJS&lt;/a&gt; Library.</comment>
-    <version>3.3.2.1.3.2.0</version>
-
-    <components>
-        <component>
-            <name>OOZIE_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>OOZIE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/PIG/configuration/pig.properties
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/PIG/configuration/pig.properties b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/PIG/configuration/pig.properties
deleted file mode 100644
index 01000b5..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/PIG/configuration/pig.properties
+++ /dev/null
@@ -1,52 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Pig default configuration file. All values can be overwritten by pig.properties and command line arguments.
-# see bin/pig -help
-
-# brief logging (no timestamps)
-brief=false
-
-#debug level, INFO is default
-debug=INFO
-
-#verbose print all log messages to screen (default to print only INFO and above to screen)
-verbose=false
-
-#exectype local|mapreduce, mapreduce is default
-exectype=mapreduce
-
-#Enable insertion of information about script into hadoop job conf 
-pig.script.info.enabled=true
-
-#Do not spill temp files smaller than this size (bytes)
-pig.spill.size.threshold=5000000
-#EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
-#This should help reduce the number of files being spilled.
-pig.spill.gc.activation.size=40000000
-
-#the following two parameters are to help estimate the reducer number
-pig.exec.reducers.bytes.per.reducer=1000000000
-pig.exec.reducers.max=999
-
-#Temporary location to store the intermediate data.
-pig.temp.dir=/tmp/
-
-#Threshold for merging FRJoin fragment files
-pig.files.concatenation.threshold=100
-pig.optimistic.files.concatenation=false;
-
-pig.disable.counter=false

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/PIG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/PIG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/PIG/metainfo.xml
deleted file mode 100644
index 8efacb4..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/PIG/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Scripting platform for analyzing large datasets</comment>
-    <version>0.11.1.1.3.2.0</version>
-
-    <components>
-        <component>
-            <name>PIG</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/SQOOP/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/SQOOP/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/SQOOP/metainfo.xml
deleted file mode 100644
index 1f03a35..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/SQOOP/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Tool for transferring bulk data between Apache Hadoop and structured data stores such as relational databases</comment>
-    <version>1.4.3.1.3.2.0</version>
-
-    <components>
-        <component>
-            <name>SQOOP</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/WEBHCAT/configuration/webhcat-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/WEBHCAT/configuration/webhcat-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/WEBHCAT/configuration/webhcat-site.xml
deleted file mode 100644
index cc30c7a..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/WEBHCAT/configuration/webhcat-site.xml
+++ /dev/null
@@ -1,126 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- 
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<!-- The default settings for Templeton. -->
-<!-- Edit templeton-site.xml to change settings for your local -->
-<!-- install. -->
-
-<configuration>
-
-  <property>
-    <name>templeton.port</name>
-    <value>50111</value>
-    <description>The HTTP port for the main server.</description>
-  </property>
-
-  <property>
-    <name>templeton.hadoop.conf.dir</name>
-    <value>/etc/hadoop/conf</value>
-    <description>The path to the Hadoop configuration.</description>
-  </property>
-
-  <property>
-    <name>templeton.jar</name>
-    <value>/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar</value>
-    <description>The path to the Templeton jar file.</description>
-  </property>
-
-  <property>
-    <name>templeton.libjars</name>
-    <value>/usr/lib/zookeeper/zookeeper.jar</value>
-    <description>Jars to add the the classpath.</description>
-  </property>
-
-
-  <property>
-    <name>templeton.hadoop</name>
-    <value>/usr/bin/hadoop</value>
-    <description>The path to the Hadoop executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.archive</name>
-    <value>hdfs:///apps/webhcat/pig.tar.gz</value>
-    <description>The path to the Pig archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.path</name>
-    <value>pig.tar.gz/pig/bin/pig</value>
-    <description>The path to the Pig executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hcat</name>
-    <value>/usr/bin/hcat</value>
-    <description>The path to the hcatalog executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.archive</name>
-    <value>hdfs:///apps/webhcat/hive.tar.gz</value>
-    <description>The path to the Hive archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.path</name>
-    <value>hive.tar.gz/hive/bin/hive</value>
-    <description>The path to the Hive executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.properties</name>
-    <value></value>
-    <description>Properties to set when running hive.</description>
-  </property>
-
-
-  <property>
-    <name>templeton.zookeeper.hosts</name>
-    <value>localhost:2181</value>
-    <description>ZooKeeper servers, as comma separated host:port pairs</description>
-  </property>
-
-  <property>
-    <name>templeton.storage.class</name>
-    <value>org.apache.hcatalog.templeton.tool.ZooKeeperStorage</value>
-    <description>The class to use as storage</description>
-  </property>
-
-  <property>
-    <name>templeton.override.enabled</name>
-    <value>false</value>
-    <description>
-      Enable the override path in templeton.override.jars
-    </description>
-  </property>
-
-  <property>
-    <name>templeton.streaming.jar</name>
-    <value>hdfs:///apps/webhcat/hadoop-streaming.jar</value>
-    <description>The hdfs path to the Hadoop streaming jar file.</description>
-  </property>
-
-  <property>
-    <name>templeton.exec.timeout</name>
-    <value>60000</value>
-    <description>Time out for templeton api</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/WEBHCAT/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/WEBHCAT/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/WEBHCAT/metainfo.xml
deleted file mode 100644
index c47b34e..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/WEBHCAT/metainfo.xml
+++ /dev/null
@@ -1,31 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for WEBHCAT service</comment>
-    <version>0.11.0.1.3.2.0</version>
-
-    <components>
-        <component>
-            <name>WEBHCAT_SERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/ZOOKEEPER/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/ZOOKEEPER/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/ZOOKEEPER/configuration/global.xml
deleted file mode 100644
index f78df89..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/ZOOKEEPER/configuration/global.xml
+++ /dev/null
@@ -1,75 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>zk_user</name>
-    <value>zookeeper</value>
-    <description>ZooKeeper User.</description>
-  </property>
-  <property>
-    <name>zookeeperserver_host</name>
-    <value></value>
-    <description>ZooKeeper Server Hosts.</description>
-  </property>
-  <property>
-    <name>zk_data_dir</name>
-    <value>/hadoop/zookeeper</value>
-    <description>Data directory for ZooKeeper.</description>
-  </property>
-  <property>
-    <name>zk_log_dir</name>
-    <value>/var/log/zookeeper</value>
-    <description>ZooKeeper Log Dir</description>
-  </property>
-  <property>
-    <name>zk_pid_dir</name>
-    <value>/var/run/zookeeper</value>
-    <description>ZooKeeper Pid Dir</description>
-  </property>
-  <property>
-    <name>zk_pid_file</name>
-    <value>/var/run/zookeeper/zookeeper_server.pid</value>
-    <description>ZooKeeper Pid File</description>
-  </property>
-  <property>
-    <name>tickTime</name>
-    <value>2000</value>
-    <description>The length of a single tick in milliseconds, which is the basic time unit used by ZooKeeper</description>
-  </property>
-  <property>
-    <name>initLimit</name>
-    <value>10</value>
-    <description>Ticks to allow for sync at Init.</description>
-  </property>
-  <property>
-    <name>syncLimit</name>
-    <value>5</value>
-    <description>Ticks to allow for sync at Runtime.</description>
-  </property>
-  <property>
-    <name>clientPort</name>
-    <value>2181</value>
-    <description>Port for running ZK Server.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/ZOOKEEPER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/ZOOKEEPER/metainfo.xml
deleted file mode 100644
index b18ae7e..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/ZOOKEEPER/metainfo.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Centralized service which provides highly reliable distributed coordination</comment>
-    <version>3.4.5.1.3.2.0</version>
-
-    <components>
-        <component>
-            <name>ZOOKEEPER_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>ZOOKEEPER_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/metainfo.xml
deleted file mode 100644
index 45a63e5..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/metainfo.xml
+++ /dev/null
@@ -1,22 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <versions>
-	  <active>false</active>
-    </versions>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/repos/repoinfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/repos/repoinfo.xml
deleted file mode 100644
index 87c68bf..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/repos/repoinfo.xml
+++ /dev/null
@@ -1,75 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<reposinfo>
-  <os type="centos6">
-    <repo>
-      <baseurl>REPLACE_WITH_CENTOS6_URL</baseurl>
-      <repoid>HDP-1.3.3</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="centos5">
-    <repo>
-      <baseurl>REPLACE_WITH_CENTOS5_URL</baseurl>
-      <repoid>HDP-1.3.3</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="redhat6">
-    <repo>
-      <baseurl>REPLACE_WITH_CENTOS6_URL</baseurl>
-      <repoid>HDP-1.3.3</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="redhat5">
-    <repo>
-      <baseurl>REPLACE_WITH_CENTOS5_URL</baseurl>
-      <repoid>HDP-1.3.3</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="oraclelinux6">
-    <repo>
-      <baseurl>REPLACE_WITH_CENTOS6_URL</baseurl>
-      <repoid>HDP-1.3.3</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="oraclelinux5">
-    <repo>
-      <baseurl>REPLACE_WITH_CENTOS5_URL</baseurl>
-      <repoid>HDP-1.3.3</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="suse11">
-    <repo>
-      <baseurl>REPLACE_WITH_SUSE11_URL</baseurl>
-      <repoid>HDP-1.3.3</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-    <os type="sles11">
-    <repo>
-      <baseurl>REPLACE_WITH_SUSE11_URL</baseurl>
-      <repoid>HDP-1.3.3</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-</reposinfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/FLUME/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/FLUME/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/FLUME/configuration/global.xml
deleted file mode 100644
index f1fa4de..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/FLUME/configuration/global.xml
+++ /dev/null
@@ -1,24 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/FLUME/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/FLUME/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/FLUME/metainfo.xml
deleted file mode 100644
index 13eba83..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/FLUME/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Flume is a distributed, reliable, and available system for efficiently collecting, aggregating and moving large amounts of log data from many different sources to a centralized data store.</comment>
-    <version>1.3.1.1.3.3.0</version>
-
-    <components>
-        <component>
-            <name>FLUME_SERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/GANGLIA/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/GANGLIA/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/GANGLIA/configuration/global.xml
deleted file mode 100644
index 16df0b8..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/GANGLIA/configuration/global.xml
+++ /dev/null
@@ -1,55 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>ganglia_conf_dir</name>
-    <value>/etc/ganglia/hdp</value>
-    <description>Config directory for Ganglia</description>
-  </property>
-  <property>
-    <name>ganglia_runtime_dir</name>
-    <value>/var/run/ganglia/hdp</value>
-    <description>Run directories for Ganglia</description>
-  </property>
-  <property>
-    <name>ganglia_runtime_dir</name>
-    <value>/var/run/ganglia/hdp</value>
-    <description>Run directories for Ganglia</description>
-  </property>
-  <property>
-    <name>gmetad_user</name>
-    <value>nobody</value>
-    <description>User </description>
-  </property>
-    <property>
-    <name>gmond_user</name>
-    <value>nobody</value>
-    <description>User </description>
-  </property>
-  <property>
-    <name>rrdcached_base_dir</name>
-    <value>/var/lib/ganglia/rrds</value>
-    <description>Default directory for saving the rrd files on ganglia server</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/GANGLIA/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/GANGLIA/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/GANGLIA/metainfo.xml
deleted file mode 100644
index 9fee795..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/GANGLIA/metainfo.xml
+++ /dev/null
@@ -1,40 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Ganglia Metrics Collection system</comment>
-    <version>3.5.0</version>
-
-    <components>
-        <component>
-            <name>GANGLIA_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>GANGLIA_MONITOR</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>MONITOR_WEBSERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HBASE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HBASE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HBASE/configuration/global.xml
deleted file mode 100644
index 453184b..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HBASE/configuration/global.xml
+++ /dev/null
@@ -1,160 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>hbasemaster_host</name>
-    <value></value>
-    <description>HBase Master Host.</description>
-  </property>
-  <property>
-    <name>regionserver_hosts</name>
-    <value></value>
-    <description>Region Server Hosts</description>
-  </property>
-  <property>
-    <name>hbase_log_dir</name>
-    <value>/var/log/hbase</value>
-    <description>Log Directories for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_pid_dir</name>
-    <value>/var/run/hbase</value>
-    <description>Log Directories for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_log_dir</name>
-    <value>/var/log/hbase</value>
-    <description>Log Directories for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_regionserver_heapsize</name>
-    <value>1024</value>
-    <description>Log Directories for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_master_heapsize</name>
-    <value>1024</value>
-    <description>HBase Master Heap Size</description>
-  </property>
-  <property>
-    <name>hstore_compactionthreshold</name>
-    <value>3</value>
-    <description>HBase HStore compaction threshold.</description>
-  </property>
-  <property>
-    <name>hfile_blockcache_size</name>
-    <value>0.25</value>
-    <description>HFile block cache size.</description>
-  </property>
-  <property>
-    <name>hstorefile_maxsize</name>
-    <value>10737418240</value>
-    <description>Maximum HStoreFile Size</description>
-  </property>
-    <property>
-    <name>regionserver_handlers</name>
-    <value>30</value>
-    <description>HBase RegionServer Handler</description>
-  </property>
-    <property>
-    <name>hregion_majorcompaction</name>
-    <value>86400000</value>
-    <description>HBase Major Compaction.</description>
-  </property>
-    <property>
-    <name>hregion_blockmultiplier</name>
-    <value>2</value>
-    <description>HBase Region Block Multiplier</description>
-  </property>
-    <property>
-    <name>hregion_memstoreflushsize</name>
-    <value></value>
-    <description>HBase Region MemStore Flush Size.</description>
-  </property>
-    <property>
-    <name>client_scannercaching</name>
-    <value>100</value>
-    <description>Base Client Scanner Caching</description>
-  </property>
-    <property>
-    <name>zookeeper_sessiontimeout</name>
-    <value>60000</value>
-    <description>ZooKeeper Session Timeout</description>
-  </property>
-    <property>
-    <name>hfile_max_keyvalue_size</name>
-    <value>10485760</value>
-    <description>HBase Client Maximum key-value Size</description>
-  </property>
-  <property>
-    <name>hbase_hdfs_root_dir</name>
-    <value>/apps/hbase/data</value>
-    <description>HBase Relative Path to HDFS.</description>
-  </property>
-   <property>
-    <name>hbase_conf_dir</name>
-    <value>/etc/hbase</value>
-    <description>Config Directory for HBase.</description>
-  </property>
-   <property>
-    <name>hdfs_enable_shortcircuit_read</name>
-    <value>true</value>
-    <description>HDFS Short Circuit Read</description>
-  </property>
-   <property>
-    <name>hdfs_support_append</name>
-    <value>true</value>
-    <description>HDFS append support</description>
-  </property>
-   <property>
-    <name>hstore_blockingstorefiles</name>
-    <value>7</value>
-    <description>HStore blocking storefiles.</description>
-  </property>
-   <property>
-    <name>regionserver_memstore_lab</name>
-    <value>true</value>
-    <description>Region Server memstore.</description>
-  </property>
-   <property>
-    <name>regionserver_memstore_lowerlimit</name>
-    <value>0.35</value>
-    <description>Region Server memstore lower limit.</description>
-  </property>
-   <property>
-    <name>regionserver_memstore_upperlimit</name>
-    <value>0.4</value>
-    <description>Region Server memstore upper limit.</description>
-  </property>
-   <property>
-    <name>hbase_conf_dir</name>
-    <value>/etc/hbase</value>
-    <description>HBase conf dir.</description>
-  </property>
-   <property>
-    <name>hbase_user</name>
-    <value>hbase</value>
-    <description>HBase User Name.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HBASE/configuration/hbase-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HBASE/configuration/hbase-policy.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HBASE/configuration/hbase-policy.xml
deleted file mode 100644
index e45f23c..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HBASE/configuration/hbase-policy.xml
+++ /dev/null
@@ -1,53 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HRegionInterface protocol implementations (ie. 
-    clients talking to HRegionServers)
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.admin.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HMasterInterface protocol implementation (ie. 
-    clients talking to HMaster for admin operations).
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.masterregion.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HMasterRegionInterface protocol implementations
-    (for HRegionServers communicating with HMaster)
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HBASE/configuration/hbase-site.xml
deleted file mode 100644
index ead0c52..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HBASE/configuration/hbase-site.xml
+++ /dev/null
@@ -1,367 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>hbase.rootdir</name>
-    <value></value>
-    <description>The directory shared by region servers and into
-      which HBase persists.  The URL should be 'fully-qualified'
-      to include the filesystem scheme.  For example, to specify the
-      HDFS directory '/hbase' where the HDFS instance's namenode is
-      running at namenode.example.org on port 9000, set this value to:
-      hdfs://namenode.example.org:9000/hbase.  By default HBase writes
-      into /tmp.  Change this configuration else all data will be lost
-      on machine restart.
-    </description>
-  </property>
-  <property>
-    <name>hbase.cluster.distributed</name>
-    <value>true</value>
-    <description>The mode the cluster will be in. Possible values are
-      false for standalone mode and true for distributed mode.  If
-      false, startup will run all HBase and ZooKeeper daemons together
-      in the one JVM.
-    </description>
-  </property>
-  <property>
-    <name>hbase.tmp.dir</name>
-    <value>/hadoop/hbase</value>
-    <description>Temporary directory on the local filesystem.
-      Change this setting to point to a location more permanent
-      than '/tmp' (The '/tmp' directory is often cleared on
-      machine restart).
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.bindAddress</name>
-    <value></value>
-    <description>The bind address for the HBase Master web UI
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.port</name>
-    <value></value>
-    <description>The port for the HBase Master web UI.</description>
-  </property>
-  <property>
-    <name>hbase.regionserver.info.port</name>
-    <value></value>
-    <description>The port for the HBase RegionServer web UI.</description>
-  </property>
-  <property>
-    <name>hbase.regionserver.global.memstore.upperLimit</name>
-    <value>0.4</value>
-    <description>Maximum size of all memstores in a region server before new
-      updates are blocked and flushes are forced. Defaults to 40% of heap
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.handler.count</name>
-    <value>60</value>
-    <description>Count of RPC Listener instances spun up on RegionServers.
-      Same property is used by the Master for count of master handlers.
-      Default is 10.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.majorcompaction</name>
-    <value>86400000</value>
-    <description>The time (in milliseconds) between 'major' compactions of all
-      HStoreFiles in a region.  Default: 1 day.
-      Set to 0 to disable automated major compactions.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.regionserver.global.memstore.lowerLimit</name>
-    <value>0.38</value>
-    <description>When memstores are being forced to flush to make room in
-      memory, keep flushing until we hit this mark. Defaults to 35% of heap.
-      This value equal to hbase.regionserver.global.memstore.upperLimit causes
-      the minimum possible flushing to occur when updates are blocked due to
-      memstore limiting.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.block.multiplier</name>
-    <value>2</value>
-    <description>Block updates if memstore has hbase.hregion.memstore.block.multiplier
-      time hbase.hregion.flush.size bytes.  Useful preventing
-      runaway memstore during spikes in update traffic.  Without an
-      upper-bound, memstore fills such that when it flushes the
-      resultant flush files take a long time to compact or split, or
-      worse, we OOME
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.flush.size</name>
-    <value>134217728</value>
-    <description>
-      Memstore will be flushed to disk if size of the memstore
-      exceeds this number of bytes.  Value is checked by a thread that runs
-      every hbase.server.thread.wakefrequency.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.mslab.enabled</name>
-    <value>true</value>
-    <description>
-      Enables the MemStore-Local Allocation Buffer,
-      a feature which works to prevent heap fragmentation under
-      heavy write loads. This can reduce the frequency of stop-the-world
-      GC pauses on large heaps.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.max.filesize</name>
-    <value>10737418240</value>
-    <description>
-      Maximum HStoreFile size. If any one of a column families' HStoreFiles has
-      grown to exceed this value, the hosting HRegion is split in two.
-      Default: 1G.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.scanner.caching</name>
-    <value>100</value>
-    <description>Number of rows that will be fetched when calling next
-      on a scanner if it is not served from (local, client) memory. Higher
-      caching values will enable faster scanners but will eat up more memory
-      and some calls of next may take longer and longer times when the cache is empty.
-      Do not set this value such that the time between invocations is greater
-      than the scanner timeout; i.e. hbase.regionserver.lease.period
-    </description>
-  </property>
-  <property>
-    <name>zookeeper.session.timeout</name>
-    <value>60000</value>
-    <description>ZooKeeper session timeout.
-      HBase passes this to the zk quorum as suggested maximum time for a
-      session (This setting becomes zookeeper's 'maxSessionTimeout').  See
-      http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
-      "The client sends a requested timeout, the server responds with the
-      timeout that it can give the client. " In milliseconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.keyvalue.maxsize</name>
-    <value>10485760</value>
-    <description>Specifies the combined maximum allowed size of a KeyValue
-      instance. This is to set an upper boundary for a single entry saved in a
-      storage file. Since they cannot be split it helps avoiding that a region
-      cannot be split any further because the data is too large. It seems wise
-      to set this to a fraction of the maximum region size. Setting it to zero
-      or less disables the check.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.compactionThreshold</name>
-    <value>3</value>
-    <description>
-      If more than this number of HStoreFiles in any one HStore
-      (one HStoreFile is written per flush of memstore) then a compaction
-      is run to rewrite all HStoreFiles files as one.  Larger numbers
-      put off compaction but when it runs, it takes longer to complete.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.blockingStoreFiles</name>
-    <value>10</value>
-    <description>
-      If more than this number of StoreFiles in any one Store
-      (one StoreFile is written per flush of MemStore) then updates are
-      blocked for this HRegion until a compaction is completed, or
-      until hbase.hstore.blockingWaitTime has been exceeded.
-    </description>
-  </property>
-  <property>
-    <name>hfile.block.cache.size</name>
-    <value>0.40</value>
-    <description>
-      Percentage of maximum heap (-Xmx setting) to allocate to block cache
-      used by HFile/StoreFile. Default of 0.25 means allocate 25%.
-      Set to 0 to disable but it's not recommended.
-    </description>
-  </property>
-
-  <!-- The following properties configure authentication information for
-       HBase processes when using Kerberos security.  There are no default
-       values, included here for documentation purposes -->
-  <property>
-    <name>hbase.master.keytab.file</name>
-    <value></value>
-    <description>Full path to the kerberos keytab file to use for logging in
-      the configured HMaster server principal.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.kerberos.principal</name>
-    <value></value>
-    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
-      that should be used to run the HMaster process.  The principal name should
-      be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the hostname
-      portion, it will be replaced with the actual hostname of the running
-      instance.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.keytab.file</name>
-    <value></value>
-    <description>Full path to the kerberos keytab file to use for logging in
-      the configured HRegionServer server principal.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.kerberos.principal</name>
-    <value></value>
-    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
-      that should be used to run the HRegionServer process.  The principal name
-      should be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the
-      hostname portion, it will be replaced with the actual hostname of the
-      running instance.  An entry for this principal must exist in the file
-      specified in hbase.regionserver.keytab.file
-    </description>
-  </property>
-
-  <!-- Additional configuration specific to HBase security -->
-  <property>
-    <name>hbase.superuser</name>
-    <value>hbase</value>
-    <description>List of users or groups (comma-separated), who are allowed
-      full privileges, regardless of stored ACLs, across the cluster.
-      Only used when HBase security is enabled.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.security.authentication</name>
-    <value>simple</value>
-  </property>
-
-  <property>
-    <name>hbase.rpc.engine</name>
-    <value>org.apache.hadoop.hbase.ipc.WritableRpcEngine</value>
-  </property>
-
-  <property>
-    <name>hbase.security.authorization</name>
-    <value>false</value>
-    <description>Enables HBase authorization. Set the value of this property to false to disable HBase authorization.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.region.classes</name>
-    <value></value>
-    <description>A comma-separated list of Coprocessors that are loaded by
-      default on all tables. For any override coprocessor method, these classes
-      will be called in order. After implementing your own Coprocessor, just put
-      it in HBase's classpath and add the fully qualified class name here.
-      A coprocessor can also be loaded on demand by setting HTableDescriptor.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.master.classes</name>
-    <value></value>
-    <description>A comma-separated list of
-      org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
-      loaded by default on the active HMaster process. For any implemented
-      coprocessor methods, the listed classes will be called in order. After
-      implementing your own MasterObserver, just put it in HBase's classpath
-      and add the fully qualified class name here.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.zookeeper.property.clientPort</name>
-    <value>2181</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-      The port at which the clients will connect.
-    </description>
-  </property>
-
-  <!--
-  The following three properties are used together to create the list of
-  host:peer_port:leader_port quorum servers for ZooKeeper.
-  -->
-  <property>
-    <name>hbase.zookeeper.quorum</name>
-    <value>localhost</value>
-    <description>Comma separated list of servers in the ZooKeeper Quorum.
-      For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
-      By default this is set to localhost for local and pseudo-distributed modes
-      of operation. For a fully-distributed setup, this should be set to a full
-      list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
-      this is the list of servers which we will start/stop ZooKeeper on.
-    </description>
-  </property>
-  <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
-
-  <property>
-    <name>dfs.support.append</name>
-    <value>true</value>
-    <description>Does HDFS allow appends to files?
-      This is an hdfs config. set in here so the hdfs client will do append support.
-      You must ensure that this config. is true serverside too when running hbase
-      (You will have to restart your cluster after setting it).
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.client.read.shortcircuit</name>
-    <value>true</value>
-    <description>Enable/Disable short circuit read for your client.
-      Hadoop servers should be configured to allow short circuit read
-      for the hbase user for this to take effect
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.client.read.shortcircuit.skip.checksum</name>
-    <value></value>
-    <description>Enable/disbale skipping the checksum check</description>
-  </property>
-
-  <property>
-    <name>hbase.zookeeper.useMulti</name>
-    <value>true</value>
-    <description>Instructs HBase to make use of ZooKeeper's multi-update functionality.
-      This allows certain ZooKeeper operations to complete more quickly and prevents some issues
-      with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).ยท
-      IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+
-      and will not be downgraded.  ZooKeeper versions before 3.4 do not support multi-update and will
-      not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).
-    </description>
-  </property>
-  <property>
-    <name>zookeeper.znode.parent</name>
-    <value>/hbase-unsecure</value>
-    <description>Root ZNode for HBase in ZooKeeper. All of HBase's ZooKeeper
-      files that are configured with a relative path will go under this node.
-      By default, all of HBase's ZooKeeper file path are configured with a
-      relative path, so they will all go under this directory unless changed.
-    </description>
-  </property>
-
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HBASE/metainfo.xml
deleted file mode 100644
index 6cffb1f..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HBASE/metainfo.xml
+++ /dev/null
@@ -1,40 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>mapred</user>
-    <comment>Non-relational distributed database and centralized service for configuration management &amp; synchronization</comment>
-    <version>0.94.6.1.3.3.0</version>
-
-    <components>
-        <component>
-            <name>HBASE_MASTER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>HBASE_REGIONSERVER</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>HBASE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HCATALOG/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HCATALOG/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HCATALOG/configuration/global.xml
deleted file mode 100644
index dd89409..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HCATALOG/configuration/global.xml
+++ /dev/null
@@ -1,45 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>hcat_log_dir</name>
-    <value>/var/log/webhcat</value>
-    <description>WebHCat Log Dir.</description>
-  </property>
-  <property>
-    <name>hcat_pid_dir</name>
-    <value>/etc/run/webhcat</value>
-    <description>WebHCat Pid Dir.</description>
-  </property>
-  <property>
-    <name>hcat_user</name>
-    <value>hcat</value>
-    <description>HCat User.</description>
-  </property>
-  <property>
-    <name>webhcat_user</name>
-    <value>hcat</value>
-    <description>WebHCat User.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HCATALOG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HCATALOG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HCATALOG/metainfo.xml
deleted file mode 100644
index 8e78530..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HCATALOG/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for HCATALOG service</comment>
-    <version>0.11.0.1.3.3.0</version>
-
-    <components>
-        <component>
-            <name>HCAT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HDFS/configuration/core-site.xml
deleted file mode 100644
index 731d984..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HDFS/configuration/core-site.xml
+++ /dev/null
@@ -1,253 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-  <!-- i/o properties -->
-
-  <property>
-    <name>io.file.buffer.size</name>
-    <value>131072</value>
-    <description>The size of buffer for use in sequence files.
-      The size of this buffer should probably be a multiple of hardware
-      page size (4096 on Intel x86), and it determines how much data is
-      buffered during read and write operations.</description>
-  </property>
-
-  <property>
-    <name>io.serializations</name>
-    <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
-  </property>
-
-  <property>
-    <name>io.compression.codecs</name>
-    <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.SnappyCodec</value>
-    <description>A list of the compression codec classes that can be used
-      for compression/decompression.</description>
-  </property>
-
-
-  <property>
-    <name>io.compression.codec.lzo.class</name>
-    <value>com.hadoop.compression.lzo.LzoCodec</value>
-    <description>The implementation for lzo codec.</description>
-  </property>
-
-
-  <!-- file system properties -->
-
-  <property>
-    <name>fs.default.name</name>
-    <!-- cluster variant -->
-    <value>hdfs://localhost:8020</value>
-    <description>The name of the default file system.  Either the
-      literal string "local" or a host:port for NDFS.</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>fs.trash.interval</name>
-    <value>360</value>
-    <description>Number of minutes between trash checkpoints.
-      If zero, the trash feature is disabled.
-    </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.dir</name>
-    <value>/hadoop/hdfs/namesecondary</value>
-    <description>Determines where on the local filesystem the DFS secondary
-      name node should store the temporary images to merge.
-      If this is a comma-delimited list of directories then the image is
-      replicated in all of the directories for redundancy.
-    </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.edits.dir</name>
-    <value>${fs.checkpoint.dir}</value>
-    <description>Determines where on the local filesystem the DFS secondary
-      name node should store the temporary edits to merge.
-      If this is a comma-delimited list of directoires then teh edits is
-      replicated in all of the directoires for redundancy.
-      Default value is same as fs.checkpoint.dir
-    </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.period</name>
-    <value>21600</value>
-    <description>The number of seconds between two periodic checkpoints.
-    </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.size</name>
-    <value>67108864</value>
-    <description>The size of the current edit log (in bytes) that triggers
-      a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
-    </description>
-  </property>
-
-  <!-- ipc properties: copied from kryptonite configuration -->
-  <property>
-    <name>ipc.client.idlethreshold</name>
-    <value>8000</value>
-    <description>Defines the threshold number of connections after which
-      connections will be inspected for idleness.
-    </description>
-  </property>
-
-  <property>
-    <name>ipc.client.connection.maxidletime</name>
-    <value>30000</value>
-    <description>The maximum time after which a client will bring down the
-      connection to the server.
-    </description>
-  </property>
-
-  <property>
-    <name>ipc.client.connect.max.retries</name>
-    <value>50</value>
-    <description>Defines the maximum number of retries for IPC connections.</description>
-  </property>
-
-  <!-- Web Interface Configuration -->
-  <property>
-    <name>webinterface.private.actions</name>
-    <value>false</value>
-    <description> If set to true, the web interfaces of JT and NN may contain
-      actions, such as kill job, delete file, etc., that should
-      not be exposed to public. Enable this option if the interfaces
-      are only reachable by those who have the right authorization.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.security.authentication</name>
-    <value>simple</value>
-    <description>
-      Set the authentication for the cluster. Valid values are: simple or
-      kerberos.
-    </description>
-  </property>
-  <property>
-    <name>hadoop.security.authorization</name>
-    <value></value>
-    <description>
-      Enable authorization for different protocols.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.security.auth_to_local</name>
-    <value></value>
-    <description>The mapping from kerberos principal names to local OS user names.
-      So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
-      "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
-      The translations rules have 3 sections:
-      base     filter    substitution
-      The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
-
-      [1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
-      [2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
-      [2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
-
-      The filter is a regex in parens that must the generated string for the rule to apply.
-
-      "(.*%admin)" will take any string that ends in "%admin"
-      "(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
-
-      Finally, the substitution is a sed rule to translate a regex into a fixed string.
-
-      "s/@ACME\.COM//" removes the first instance of "@ACME.COM".
-      "s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
-      "s/X/Y/g" replaces all of the "X" in the name with "Y"
-
-      So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
-
-      RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-      DEFAULT
-
-      To also translate the names with a second component, you'd make the rules:
-
-      RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-      RULE:[2:$1@$0](.@ACME.ORG)s/@.//
-      DEFAULT
-
-      If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
-
-      RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
-      DEFAULT
-    </description>
-  </property>
-
-  <!--
-  <property>
-    <name>hadoop.proxyuser.scope.function_hdp_user("hcat_user").groups</name>
-    <value></value>
-    <description>
-       Proxy group for Hadoop.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.proxyuser.scope.function_hdp_user("hcat_user").hosts</name>
-    <value></value>
-    <description>
-       Proxy host for Hadoop.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.proxyuser.scope.function_hdp_user("oozie_user").groups</name>
-    <value></value>
-    <description>
-       Proxy group for Hadoop.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.proxyuser.scope.function_hdp_user("oozie_user").hosts</name>
-    <value></value>
-    <description>
-       Proxy host for Hadoop.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.proxyuser.scope.function_hdp_user("templeton_user").groups</name>
-    <value></value>
-    <description>
-      Proxy group for templeton.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.proxyuser.scope.function_hdp_user("templeton_user").hosts</name>
-    <value></value>
-    <description>
-      Proxy host for templeton.
-    </description>
-  </property>
-  -->
-</configuration>