You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by yu...@apache.org on 2013/11/15 20:12:34 UTC

[13/14] AMBARI-3777. Remove HDPLocal stack from stack definition. (yusaku)

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/capacity-scheduler.xml
deleted file mode 100644
index 8034d19..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/capacity-scheduler.xml
+++ /dev/null
@@ -1,195 +0,0 @@
-<?xml version="1.0"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- This is the configuration file for the resource manager in Hadoop. -->
-<!-- You can configure various scheduling parameters related to queues. -->
-<!-- The properties for a queue follow a naming convention,such as, -->
-<!-- mapred.capacity-scheduler.queue.<queue-name>.property-name. -->
-
-<configuration>
-
-  <property>
-    <name>mapred.capacity-scheduler.maximum-system-jobs</name>
-    <value>3000</value>
-    <description>Maximum number of jobs in the system which can be initialized,
-     concurrently, by the CapacityScheduler.
-    </description>    
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.capacity</name>
-    <value>100</value>
-    <description>Percentage of the number of slots in the cluster that are
-      to be available for jobs in this queue.
-    </description>    
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.maximum-capacity</name>
-    <value>-1</value>
-    <description>
-	maximum-capacity defines a limit beyond which a queue cannot use the capacity of the cluster.
-	This provides a means to limit how much excess capacity a queue can use. By default, there is no limit.
-	The maximum-capacity of a queue can only be greater than or equal to its minimum capacity.
-        Default value of -1 implies a queue can use complete capacity of the cluster.
-
-        This property could be to curtail certain jobs which are long running in nature from occupying more than a 
-        certain percentage of the cluster, which in the absence of pre-emption, could lead to capacity guarantees of 
-        other queues being affected.
-        
-        One important thing to note is that maximum-capacity is a percentage , so based on the cluster's capacity
-        the max capacity would change. So if large no of nodes or racks get added to the cluster , max Capacity in 
-        absolute terms would increase accordingly.
-    </description>    
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.supports-priority</name>
-    <value>false</value>
-    <description>If true, priorities of jobs will be taken into 
-      account in scheduling decisions.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.minimum-user-limit-percent</name>
-    <value>100</value>
-    <description> Each queue enforces a limit on the percentage of resources 
-    allocated to a user at any given time, if there is competition for them. 
-    This user limit can vary between a minimum and maximum value. The former
-    depends on the number of users who have submitted jobs, and the latter is
-    set to this property value. For example, suppose the value of this 
-    property is 25. If two users have submitted jobs to a queue, no single 
-    user can use more than 50% of the queue resources. If a third user submits
-    a job, no single user can use more than 33% of the queue resources. With 4 
-    or more users, no user can use more than 25% of the queue's resources. A 
-    value of 100 implies no user limits are imposed. 
-    </description>
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.user-limit-factor</name>
-    <value>1</value>
-    <description>The multiple of the queue capacity which can be configured to 
-    allow a single user to acquire more slots. 
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks</name>
-    <value>200000</value>
-    <description>The maximum number of tasks, across all jobs in the queue, 
-    which can be initialized concurrently. Once the queue's jobs exceed this 
-    limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks-per-user</name>
-    <value>100000</value>
-    <description>The maximum number of tasks per-user, across all the of the 
-    user's jobs in the queue, which can be initialized concurrently. Once the 
-    user's jobs exceed this limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.init-accept-jobs-factor</name>
-    <value>10</value>
-    <description>The multipe of (maximum-system-jobs * queue-capacity) used to 
-    determine the number of jobs which are accepted by the scheduler.  
-    </description>
-  </property>
-
-  <!-- The default configuration settings for the capacity task scheduler -->
-  <!-- The default values would be applied to all the queues which don't have -->
-  <!-- the appropriate property for the particular queue -->
-  <property>
-    <name>mapred.capacity-scheduler.default-supports-priority</name>
-    <value>false</value>
-    <description>If true, priorities of jobs will be taken into 
-      account in scheduling decisions by default in a job queue.
-    </description>
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.default-minimum-user-limit-percent</name>
-    <value>100</value>
-    <description>The percentage of the resources limited to a particular user
-      for the job queue at any given point of time by default.
-    </description>
-  </property>
-
-
-  <property>
-    <name>mapred.capacity-scheduler.default-user-limit-factor</name>
-    <value>1</value>
-    <description>The default multiple of queue-capacity which is used to 
-    determine the amount of slots a single user can consume concurrently.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.default-maximum-active-tasks-per-queue</name>
-    <value>200000</value>
-    <description>The default maximum number of tasks, across all jobs in the 
-    queue, which can be initialized concurrently. Once the queue's jobs exceed 
-    this limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.default-maximum-active-tasks-per-user</name>
-    <value>100000</value>
-    <description>The default maximum number of tasks per-user, across all the of 
-    the user's jobs in the queue, which can be initialized concurrently. Once 
-    the user's jobs exceed this limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.default-init-accept-jobs-factor</name>
-    <value>10</value>
-    <description>The default multipe of (maximum-system-jobs * queue-capacity) 
-    used to determine the number of jobs which are accepted by the scheduler.  
-    </description>
-  </property>
-
-  <!-- Capacity scheduler Job Initialization configuration parameters -->
-  <property>
-    <name>mapred.capacity-scheduler.init-poll-interval</name>
-    <value>5000</value>
-    <description>The amount of time in miliseconds which is used to poll 
-    the job queues for jobs to initialize.
-    </description>
-  </property>
-  <property>
-    <name>mapred.capacity-scheduler.init-worker-threads</name>
-    <value>5</value>
-    <description>Number of worker threads which would be used by
-    Initialization poller to initialize jobs in a set of queue.
-    If number mentioned in property is equal to number of job queues
-    then a single thread would initialize jobs in a queue. If lesser
-    then a thread would get a set of queues assigned. If the number
-    is greater then number of threads would be equal to number of 
-    job queues.
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/core-site.xml
deleted file mode 100644
index 3a2af49..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/core-site.xml
+++ /dev/null
@@ -1,20 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<configuration>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/mapred-queue-acls.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/mapred-queue-acls.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/mapred-queue-acls.xml
deleted file mode 100644
index ce12380..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/mapred-queue-acls.xml
+++ /dev/null
@@ -1,39 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- mapred-queue-acls.xml -->
-<configuration>
-
-
-<!-- queue default -->
-
-  <property>
-    <name>mapred.queue.default.acl-submit-job</name>
-    <value>*</value>
-  </property>
-
-  <property>
-    <name>mapred.queue.default.acl-administer-jobs</name>
-    <value>*</value>
-  </property>
-
-  <!-- END ACLs -->
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/mapred-site.xml
deleted file mode 100644
index 604adb1..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/configuration/mapred-site.xml
+++ /dev/null
@@ -1,531 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-<!-- i/o properties -->
-
-  <property>
-    <name>io.sort.mb</name>
-    <value></value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>io.sort.record.percent</name>
-    <value>.2</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>io.sort.spill.percent</name>
-    <value></value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>io.sort.factor</name>
-    <value>100</value>
-    <description>No description</description>
-  </property>
-
-<!-- map/reduce properties -->
-
-<property>
-  <name>mapred.tasktracker.tasks.sleeptime-before-sigkill</name>
-  <value>250</value>
-  <description>Normally, this is the amount of time before killing
-  processes, and the recommended-default is 5.000 seconds - a value of
-  5000 here.  In this case, we are using it solely to blast tasks before
-  killing them, and killing them very quickly (1/4 second) to guarantee
-  that we do not leave VMs around for later jobs.
-  </description>
-</property>
-
-  <property>
-    <name>mapred.job.tracker.handler.count</name>
-    <value>50</value>
-    <description>
-    The number of server threads for the JobTracker. This should be roughly
-    4% of the number of tasktracker nodes.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.system.dir</name>
-    <value></value>
-    <description>No description</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>No description</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker.http.address</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>No description</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <!-- cluster specific -->
-    <name>mapred.local.dir</name>
-    <value></value>
-    <description>No description</description>
-    <final>true</final>
-  </property>
-
-  <property>
-  <name>mapreduce.cluster.administrators</name>
-  <value> hadoop</value>
-  </property>
-
-  <property>
-    <name>mapred.reduce.parallel.copies</name>
-    <value>30</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.tasktracker.map.tasks.maximum</name>
-    <value></value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.tasktracker.reduce.tasks.maximum</name>
-    <value></value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>tasktracker.http.threads</name>
-    <value>50</value>
-  </property>
-
-  <property>
-    <name>mapred.map.tasks.speculative.execution</name>
-    <value>false</value>
-    <description>If true, then multiple instances of some map tasks
-               may be executed in parallel.</description>
-  </property>
-
-  <property>
-    <name>mapred.reduce.tasks.speculative.execution</name>
-    <value>false</value>
-    <description>If true, then multiple instances of some reduce tasks
-               may be executed in parallel.</description>
-  </property>
-
-  <property>
-    <name>mapred.reduce.slowstart.completed.maps</name>
-    <value>0.05</value>
-  </property>
-
-  <property>
-    <name>mapred.inmem.merge.threshold</name>
-    <value>1000</value>
-    <description>The threshold, in terms of the number of files
-  for the in-memory merge process. When we accumulate threshold number of files
-  we initiate the in-memory merge and spill to disk. A value of 0 or less than
-  0 indicates we want to DON'T have any threshold and instead depend only on
-  the ramfs's memory consumption to trigger the merge.
-  </description>
-  </property>
-
-  <property>
-    <name>mapred.job.shuffle.merge.percent</name>
-    <value>0.66</value>
-    <description>The usage threshold at which an in-memory merge will be
-  initiated, expressed as a percentage of the total memory allocated to
-  storing in-memory map outputs, as defined by
-  mapred.job.shuffle.input.buffer.percent.
-  </description>
-  </property>
-
-  <property>
-    <name>mapred.job.shuffle.input.buffer.percent</name>
-    <value>0.7</value>
-    <description>The percentage of memory to be allocated from the maximum heap
-  size to storing map outputs during the shuffle.
-  </description>
-  </property>
-
-  <property>
-    <name>mapred.map.output.compression.codec</name>
-    <value></value>
-    <description>If the map outputs are compressed, how should they be
-      compressed
-    </description>
-  </property>
-
-<property>
-  <name>mapred.output.compression.type</name>
-  <value>BLOCK</value>
-  <description>If the job outputs are to compressed as SequenceFiles, how should
-               they be compressed? Should be one of NONE, RECORD or BLOCK.
-  </description>
-</property>
-
-
-  <property>
-    <name>mapred.jobtracker.completeuserjobs.maximum</name>
-    <value>5</value>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.taskScheduler</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.restart.recover</name>
-    <value>false</value>
-    <description>"true" to enable (job) recovery upon restart,
-               "false" to start afresh
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.reduce.input.buffer.percent</name>
-    <value>0.0</value>
-    <description>The percentage of memory- relative to the maximum heap size- to
-  retain map outputs during the reduce. When the shuffle is concluded, any
-  remaining map outputs in memory must consume less than this threshold before
-  the reduce can begin.
-  </description>
-  </property>
-
- <property>
-  <name>mapreduce.reduce.input.limit</name>
-  <value>10737418240</value>
-  <description>The limit on the input size of the reduce. (This value
-  is 10 Gb.)  If the estimated input size of the reduce is greater than
-  this value, job is failed. A value of -1 means that there is no limit
-  set. </description>
-</property>
-
-
-  <!-- copied from kryptonite configuration -->
-  <property>
-    <name>mapred.compress.map.output</name>
-    <value></value>
-  </property>
-
-
-  <property>
-    <name>mapred.task.timeout</name>
-    <value>600000</value>
-    <description>The number of milliseconds before a task will be
-  terminated if it neither reads an input, writes an output, nor
-  updates its status string.
-  </description>
-  </property>
-
-  <property>
-    <name>jetty.connector</name>
-    <value>org.mortbay.jetty.nio.SelectChannelConnector</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.task.tracker.task-controller</name>
-    <value></value>
-   <description>
-     TaskController which is used to launch and manage task execution.
-  </description>
-  </property>
-
-  <property>
-    <name>mapred.child.root.logger</name>
-    <value>INFO,TLA</value>
-  </property>
-
-  <property>
-    <name>mapred.child.java.opts</name>
-    <value></value>
-
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.cluster.map.memory.mb</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.cluster.reduce.memory.mb</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.job.map.memory.mb</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.job.reduce.memory.mb</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.cluster.max.map.memory.mb</name>
-    <value></value>
-  </property>
-
-  <property>
-    <name>mapred.cluster.max.reduce.memory.mb</name>
-    <value></value>
-  </property>
-
-<property>
-  <name>mapred.hosts</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.hosts.exclude</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.max.tracker.blacklists</name>
-  <value>16</value>
-  <description>
-    if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted
-  </description>
-</property>
-
-<property>
-  <name>mapred.healthChecker.script.path</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.healthChecker.interval</name>
-  <value>135000</value>
-</property>
-
-<property>
-  <name>mapred.healthChecker.script.timeout</name>
-  <value>60000</value>
-</property>
-
-<property>
-  <name>mapred.job.tracker.persist.jobstatus.active</name>
-  <value>false</value>
-  <description>Indicates if persistency of job status information is
-  active or not.
-  </description>
-</property>
-
-<property>
-  <name>mapred.job.tracker.persist.jobstatus.hours</name>
-  <value>1</value>
-  <description>The number of hours job status information is persisted in DFS.
-    The job status information will be available after it drops of the memory
-    queue and between jobtracker restarts. With a zero value the job status
-    information is not persisted at all in DFS.
-  </description>
-</property>
-
-<property>
-  <name>mapred.job.tracker.persist.jobstatus.dir</name>
-  <value></value>
-  <description>The directory where the job status information is persisted
-   in a file system to be available after it drops of the memory queue and
-   between jobtracker restarts.
-  </description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.retirejob.check</name>
-  <value>10000</value>
-</property>
-
-<property>
-  <name>mapred.jobtracker.retirejob.interval</name>
-  <value>21600000</value>
-</property>
-
-<property>
-  <name>mapred.job.tracker.history.completed.location</name>
-  <value>/mapred/history/done</value>
-  <description>No description</description>
-</property>
-
-<property>
-  <name>mapred.task.maxvmem</name>
-  <value></value>
-  <final>true</final>
-   <description>No description</description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.maxtasks.per.job</name>
-  <value></value>
-  <final>true</final>
-  <description>The maximum number of tasks for a single job.
-  A value of -1 indicates that there is no maximum.  </description>
-</property>
-
-<property>
-  <name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name>
-  <value>false</value>
-</property>
-
-<property>
-  <name>mapred.userlog.retain.hours</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.job.reuse.jvm.num.tasks</name>
-  <value>1</value>
-  <description>
-    How many tasks to run per jvm. If set to -1, there is no limit
-  </description>
-  <final>true</final>
-</property>
-
-<property>
-  <name>mapreduce.jobtracker.kerberos.principal</name>
-  <value></value>
-  <description>
-      JT user name key.
- </description>
-</property>
-
-<property>
-  <name>mapreduce.tasktracker.kerberos.principal</name>
-   <value></value>
-  <description>
-       tt user name key. "_HOST" is replaced by the host name of the task tracker.
-   </description>
-</property>
-
-
-  <property>
-    <name>hadoop.job.history.user.location</name>
-    <value>none</value>
-    <final>true</final>
-  </property>
-
-
- <property>
-   <name>mapreduce.jobtracker.keytab.file</name>
-   <value></value>
-   <description>
-       The keytab for the jobtracker principal.
-   </description>
-
-</property>
-
- <property>
-   <name>mapreduce.tasktracker.keytab.file</name>
-   <value></value>
-    <description>The filename of the keytab for the task tracker</description>
- </property>
-
- <property>
-   <name>mapreduce.jobtracker.staging.root.dir</name>
-   <value>/user</value>
- <description>The Path prefix for where the staging directories should be placed. The next level is always the user's
-   name. It is a path in the default file system.</description>
- </property>
-
- <property>
-      <name>mapreduce.tasktracker.group</name>
-      <value>hadoop</value>
-      <description>The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.</description>
-
- </property>
-
-  <property>
-    <name>mapreduce.jobtracker.split.metainfo.maxsize</name>
-    <value>50000000</value>
-    <final>true</final>
-     <description>If the size of the split metainfo file is larger than this, the JobTracker will fail the job during
-    initialize.
-   </description>
-  </property>
-  <property>
-    <name>mapreduce.history.server.embedded</name>
-    <value>false</value>
-    <description>Should job history server be embedded within Job tracker
-process</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapreduce.history.server.http.address</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>Http address of the history server</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapreduce.jobhistory.kerberos.principal</name>
-    <!-- cluster variant -->
-  <value></value>
-    <description>Job history user name key. (must map to same user as JT
-user)</description>
-  </property>
-
- <property>
-   <name>mapreduce.jobhistory.keytab.file</name>
-    <!-- cluster variant -->
-   <value></value>
-   <description>The keytab for the job history server principal.</description>
- </property>
-
-<property>
-  <name>mapred.jobtracker.blacklist.fault-timeout-window</name>
-  <value>180</value>
-  <description>
-    3-hour sliding window (value is in minutes)
-  </description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.blacklist.fault-bucket-width</name>
-  <value>15</value>
-  <description>
-    15-minute bucket size (value is in minutes)
-  </description>
-</property>
-
-<property>
-  <name>mapred.queue.names</name>
-  <value>default</value>
-  <description> Comma separated list of queues configured for this jobtracker.</description>
-</property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/metainfo.xml
deleted file mode 100644
index 79d219b..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/metainfo.xml
+++ /dev/null
@@ -1,41 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>mapred</user>
-    <comment>Apache Hadoop Distributed Processing Framework</comment>
-    <version>1.1.2</version>
-
-    <components>
-        <component>
-            <name>JOBTRACKER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>TASKTRACKER</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>MAPREDUCE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/NAGIOS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/NAGIOS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/NAGIOS/metainfo.xml
deleted file mode 100644
index bd7de07..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/NAGIOS/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Nagios Monitoring and Alerting system</comment>
-    <version>3.2.3</version>
-
-    <components>
-        <component>
-            <name>NAGIOS_SERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/OOZIE/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/OOZIE/configuration/oozie-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/OOZIE/configuration/oozie-site.xml
deleted file mode 100644
index 1665ba8..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/OOZIE/configuration/oozie-site.xml
+++ /dev/null
@@ -1,245 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-        
-       http://www.apache.org/licenses/LICENSE-2.0
-  
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->     
-
-<configuration>
-
-<!--
-    Refer to the oozie-default.xml file for the complete list of
-    Oozie configuration properties and their default values.
--->
-  <property>
-    <name>oozie.base.url</name>
-    <value>http://localhost:11000/oozie</value>
-    <description>Base Oozie URL.</description>
-   </property>
-
-  <property>
-    <name>oozie.system.id</name>
-    <value>oozie-${user.name}</value>
-    <description>
-    The Oozie system ID.
-    </description>
-   </property>
-
-   <property>
-     <name>oozie.systemmode</name>
-     <value>NORMAL</value>
-     <description>
-     System mode for  Oozie at startup.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.AuthorizationService.security.enabled</name>
-     <value>true</value>
-     <description>
-     Specifies whether security (user name/admin role) is enabled or not.
-     If disabled any user can manage Oozie system and manage any job.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.PurgeService.older.than</name>
-     <value>30</value>
-     <description>
-     Jobs older than this value, in days, will be purged by the PurgeService.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.PurgeService.purge.interval</name>
-     <value>3600</value>
-     <description>
-     Interval at which the purge service will run, in seconds.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.queue.size</name>
-     <value>1000</value>
-     <description>Max callable queue size</description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.threads</name>
-     <value>10</value>
-     <description>Number of threads used for executing callables</description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.callable.concurrency</name>
-     <value>3</value>
-     <description>
-     Maximum concurrency for a given callable type.
-     Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
-     Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
-     All commands that use action executors (action-start, action-end, action-kill and action-check) use
-     the action type as the callable type.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.coord.normal.default.timeout</name>
-     <value>120</value>
-     <description>Default timeout for a coordinator action input check (in minutes) for normal job.
-      -1 means infinite timeout</description>
-   </property>
-
-   <property>
-     <name>oozie.db.schema.name</name>
-     <value>oozie</value>
-     <description>
-      Oozie DataBase Name
-     </description>
-   </property>
-
-    <property>
-      <name>oozie.service.HadoopAccessorService.jobTracker.whitelist</name>
-      <value> </value>
-      <description>
-      Whitelisted job tracker for Oozie service.
-      </description>
-    </property>
-   
-    <property>
-      <name>oozie.authentication.type</name>
-      <value>simple</value>
-      <description>
-      </description>
-    </property>
-   
-    <property>
-      <name>oozie.service.HadoopAccessorService.nameNode.whitelist</name>
-      <value> </value>
-      <description>
-      </description>
-    </property>
-
-    <property>
-      <name>oozie.service.WorkflowAppService.system.libpath</name>
-      <value>/user/${user.name}/share/lib</value>
-      <description>
-      System library path to use for workflow applications.
-      This path is added to workflow application if their job properties sets
-      the property 'oozie.use.system.libpath' to true.
-      </description>
-    </property>
-
-    <property>
-      <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
-      <value>false</value>
-      <description>
-      If set to true, submissions of MapReduce and Pig jobs will include
-      automatically the system library path, thus not requiring users to
-      specify where the Pig JAR files are. Instead, the ones from the system
-      library path are used.
-      </description>
-    </property>
-    <property>
-      <name>oozie.authentication.kerberos.name.rules</name>
-      <value>
-        RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/
-        RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/
-        RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
-        RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
-        DEFAULT
-        </value>
-      <description>The mapping from kerberos principal names to local OS user names.</description>
-    </property>
-    <property>
-      <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
-      <value>*=/etc/hadoop/conf</value>
-      <description>
-          Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
-          the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
-          used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
-          the relevant Hadoop *-site.xml files. If the path is relative is looked within
-          the Oozie configuration directory; though the path can be absolute (i.e. to point
-          to Hadoop client conf/ directories in the local filesystem.
-      </description>
-    </property>
-    <property>
-        <name>oozie.service.ActionService.executor.ext.classes</name>
-        <value>
-            org.apache.oozie.action.email.EmailActionExecutor,
-            org.apache.oozie.action.hadoop.HiveActionExecutor,
-            org.apache.oozie.action.hadoop.ShellActionExecutor,
-            org.apache.oozie.action.hadoop.SqoopActionExecutor,
-            org.apache.oozie.action.hadoop.DistcpActionExecutor
-        </value>
-    </property>
-
-    <property>
-        <name>oozie.service.SchemaService.wf.ext.schemas</name>
-        <value>shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd</value>
-    </property>
-    <property>
-        <name>oozie.service.JPAService.create.db.schema</name>
-        <value>false</value>
-        <description>
-            Creates Oozie DB.
-
-            If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
-            If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.driver</name>
-        <value>org.apache.derby.jdbc.EmbeddedDriver</value>
-        <description>
-            JDBC driver class.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.url</name>
-        <value>jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true</value>
-        <description>
-            JDBC URL.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.username</name>
-        <value>sa</value>
-        <description>
-            DB user name.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.password</name>
-        <value> </value>
-        <description>
-            DB user password.
-
-            IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
-                       if empty Configuration assumes it is NULL.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.pool.max.active.conn</name>
-        <value>10</value>
-        <description>
-             Max number of connections.
-        </description>
-    </property>
-</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/OOZIE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/OOZIE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/OOZIE/metainfo.xml
deleted file mode 100644
index 83ccb06..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/OOZIE/metainfo.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>System for workflow coordination and execution of Apache Hadoop jobs</comment>
-    <version>3.2.0</version>
-
-    <components>
-        <component>
-            <name>OOZIE_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>OOZIE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/PIG/configuration/pig.properties
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/PIG/configuration/pig.properties b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/PIG/configuration/pig.properties
deleted file mode 100644
index 01000b5..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/PIG/configuration/pig.properties
+++ /dev/null
@@ -1,52 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Pig default configuration file. All values can be overwritten by pig.properties and command line arguments.
-# see bin/pig -help
-
-# brief logging (no timestamps)
-brief=false
-
-#debug level, INFO is default
-debug=INFO
-
-#verbose print all log messages to screen (default to print only INFO and above to screen)
-verbose=false
-
-#exectype local|mapreduce, mapreduce is default
-exectype=mapreduce
-
-#Enable insertion of information about script into hadoop job conf 
-pig.script.info.enabled=true
-
-#Do not spill temp files smaller than this size (bytes)
-pig.spill.size.threshold=5000000
-#EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
-#This should help reduce the number of files being spilled.
-pig.spill.gc.activation.size=40000000
-
-#the following two parameters are to help estimate the reducer number
-pig.exec.reducers.bytes.per.reducer=1000000000
-pig.exec.reducers.max=999
-
-#Temporary location to store the intermediate data.
-pig.temp.dir=/tmp/
-
-#Threshold for merging FRJoin fragment files
-pig.files.concatenation.threshold=100
-pig.optimistic.files.concatenation=false;
-
-pig.disable.counter=false

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/PIG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/PIG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/PIG/metainfo.xml
deleted file mode 100644
index 4982fd2..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/PIG/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Scripting platform for analyzing large datasets</comment>
-    <version>0.10.1</version>
-
-    <components>
-        <component>
-            <name>PIG</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/SQOOP/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/SQOOP/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/SQOOP/metainfo.xml
deleted file mode 100644
index ae0e68b..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/SQOOP/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Tool for transferring bulk data between Apache Hadoop and structured data stores such as relational databases</comment>
-    <version>1.4.2</version>
-
-    <components>
-        <component>
-            <name>SQOOP</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/WEBHCAT/configuration/webhcat-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/WEBHCAT/configuration/webhcat-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/WEBHCAT/configuration/webhcat-site.xml
deleted file mode 100644
index 31d0113..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/WEBHCAT/configuration/webhcat-site.xml
+++ /dev/null
@@ -1,126 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- 
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<!-- The default settings for Templeton. -->
-<!-- Edit templeton-site.xml to change settings for your local -->
-<!-- install. -->
-
-<configuration>
-
-  <property>
-    <name>templeton.port</name>
-      <value>50111</value>
-    <description>The HTTP port for the main server.</description>
-  </property>
-
-  <property>
-    <name>templeton.hadoop.conf.dir</name>
-    <value>/etc/hadoop/conf</value>
-    <description>The path to the Hadoop configuration.</description>
-  </property>
-
-  <property>
-    <name>templeton.jar</name>
-    <value>/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar</value>
-    <description>The path to the Templeton jar file.</description>
-  </property>
-
-  <property>
-    <name>templeton.libjars</name>
-    <value>/usr/lib/zookeeper/zookeeper.jar</value>
-    <description>Jars to add the the classpath.</description>
-  </property>
-
-
-  <property>
-    <name>templeton.hadoop</name>
-    <value>/usr/bin/hadoop</value>
-    <description>The path to the Hadoop executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.archive</name>
-    <value>hdfs:///apps/webhcat/pig.tar.gz</value>
-    <description>The path to the Pig archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.path</name>
-    <value>pig.tar.gz/pig/bin/pig</value>
-    <description>The path to the Pig executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hcat</name>
-    <value>/usr/bin/hcat</value>
-    <description>The path to the hcatalog executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.archive</name>
-    <value>hdfs:///apps/webhcat/hive.tar.gz</value>
-    <description>The path to the Hive archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.path</name>
-    <value>hive.tar.gz/hive/bin/hive</value>
-    <description>The path to the Hive executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.properties</name>
-    <value></value>
-    <description>Properties to set when running hive.</description>
-  </property>
-
-
-  <property>
-    <name>templeton.zookeeper.hosts</name>
-    <value></value>
-    <description>ZooKeeper servers, as comma separated host:port pairs</description>
-  </property>
-
-  <property>
-    <name>templeton.storage.class</name>
-    <value>org.apache.hcatalog.templeton.tool.ZooKeeperStorage</value>
-    <description>The class to use as storage</description>
-  </property>
-
-  <property>
-   <name>templeton.override.enabled</name>
-   <value>false</value>
-   <description>
-     Enable the override path in templeton.override.jars
-   </description>
- </property>
-
- <property>
-    <name>templeton.streaming.jar</name>
-    <value>hdfs:///apps/webhcat/hadoop-streaming.jar</value>
-    <description>The hdfs path to the Hadoop streaming jar file.</description>
-  </property> 
-
-  <property>
-    <name>templeton.exec.timeout</name>
-    <value>60000</value>
-    <description>Time out for templeton api</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/WEBHCAT/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/WEBHCAT/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/WEBHCAT/metainfo.xml
deleted file mode 100644
index e65992f..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/WEBHCAT/metainfo.xml
+++ /dev/null
@@ -1,31 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for WEBHCAT service</comment>
-    <version>0.5.0</version>
-
-    <components>
-        <component>
-            <name>WEBHCAT_SERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/ZOOKEEPER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/ZOOKEEPER/metainfo.xml
deleted file mode 100644
index fc0c3b5..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/ZOOKEEPER/metainfo.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Centralized service which provides highly reliable distributed coordination</comment>
-    <version>3.4.5</version>
-
-    <components>
-        <component>
-            <name>ZOOKEEPER_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>ZOOKEEPER_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/repos/repoinfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/repos/repoinfo.xml
deleted file mode 100644
index e8f1855..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/repos/repoinfo.xml
+++ /dev/null
@@ -1,75 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<reposinfo>
-  <os type="centos6">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/1.x/updates/1.2.1</baseurl>
-      <repoid>HDP-1.2.1</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="centos5">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos5/1.x/updates/1.2.1</baseurl>
-      <repoid>HDP-1.2.1</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="redhat6">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/1.x/updates/1.2.1</baseurl>
-      <repoid>HDP-1.2.1</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="redhat5">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos5/1.x/updates/1.2.1</baseurl>
-      <repoid>HDP-1.2.1</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="oraclelinux6">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/1.x/updates/1.2.1</baseurl>
-      <repoid>HDP-1.2.1</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="oraclelinux5">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos5/1.x/updates/1.2.1</baseurl>
-      <repoid>HDP-1.2.1</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="suse11">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/suse11/1.x/updates/1.2.1</baseurl>
-      <repoid>HDP-1.2.1</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="sles11">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/suse11/1.x/updates/1.2.1</baseurl>
-      <repoid>HDP-1.2.1</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-</reposinfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/GANGLIA/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/GANGLIA/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/GANGLIA/metainfo.xml
deleted file mode 100644
index 0b21f0f..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/GANGLIA/metainfo.xml
+++ /dev/null
@@ -1,40 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Ganglia Metrics Collection system</comment>
-    <version>3.2.0</version>
-
-    <components>
-        <component>
-            <name>GANGLIA_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>GANGLIA_MONITOR</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>MONITOR_WEBSERVER</name>
-            <category>MASTER</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HBASE/configuration/hbase-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HBASE/configuration/hbase-policy.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HBASE/configuration/hbase-policy.xml
deleted file mode 100644
index e45f23c..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HBASE/configuration/hbase-policy.xml
+++ /dev/null
@@ -1,53 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HRegionInterface protocol implementations (ie. 
-    clients talking to HRegionServers)
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.admin.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HMasterInterface protocol implementation (ie. 
-    clients talking to HMaster for admin operations).
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.masterregion.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HMasterRegionInterface protocol implementations
-    (for HRegionServers communicating with HMaster)
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HBASE/configuration/hbase-site.xml
deleted file mode 100644
index 7710cb0..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HBASE/configuration/hbase-site.xml
+++ /dev/null
@@ -1,345 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>hbase.rootdir</name>
-    <value></value>
-    <description>The directory shared by region servers and into
-    which HBase persists.  The URL should be 'fully-qualified'
-    to include the filesystem scheme.  For example, to specify the
-    HDFS directory '/hbase' where the HDFS instance's namenode is
-    running at namenode.example.org on port 9000, set this value to:
-    hdfs://namenode.example.org:9000/hbase.  By default HBase writes
-    into /tmp.  Change this configuration else all data will be lost
-    on machine restart.
-    </description>
-  </property>
-  <property>
-    <name>hbase.cluster.distributed</name>
-    <value>true</value>
-    <description>The mode the cluster will be in. Possible values are
-      false for standalone mode and true for distributed mode.  If
-      false, startup will run all HBase and ZooKeeper daemons together
-      in the one JVM.
-    </description>
-  </property>
-  <property>
-    <name>hbase.tmp.dir</name>
-    <value></value>
-    <description>Temporary directory on the local filesystem.
-    Change this setting to point to a location more permanent
-    than '/tmp' (The '/tmp' directory is often cleared on
-    machine restart).
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.bindAddress</name>
-    <value></value>
-    <description>The bind address for the HBase Master web UI
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.global.memstore.upperLimit</name>
-    <value></value>
-    <description>Maximum size of all memstores in a region server before new
-      updates are blocked and flushes are forced. Defaults to 40% of heap
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.handler.count</name>
-    <value></value>
-    <description>Count of RPC Listener instances spun up on RegionServers.
-    Same property is used by the Master for count of master handlers.
-    Default is 10.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.majorcompaction</name>
-    <value></value>
-    <description>The time (in miliseconds) between 'major' compactions of all
-    HStoreFiles in a region.  Default: 1 day.
-    Set to 0 to disable automated major compactions.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.lease.thread.wakefrequency</name>
-    <value>3000</value>
-    <description>The interval between checks for expired region server leases.
-    This value has been reduced due to the other reduced values above so that
-    the master will notice a dead region server sooner. The default is 15 seconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.global.memstore.lowerLimit</name>
-    <value></value>
-    <description>When memstores are being forced to flush to make room in
-      memory, keep flushing until we hit this mark. Defaults to 35% of heap.
-      This value equal to hbase.regionserver.global.memstore.upperLimit causes
-      the minimum possible flushing to occur when updates are blocked due to
-      memstore limiting.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.block.multiplier</name>
-    <value></value>
-    <description>Block updates if memstore has hbase.hregion.memstore.block.multiplier
-    time hbase.hregion.flush.size bytes.  Useful preventing
-    runaway memstore during spikes in update traffic.  Without an
-    upper-bound, memstore fills such that when it flushes the
-    resultant flush files take a long time to compact or split, or
-    worse, we OOME
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.flush.size</name>
-    <value></value>
-    <description>
-    Memstore will be flushed to disk if size of the memstore
-    exceeds this number of bytes.  Value is checked by a thread that runs
-    every hbase.server.thread.wakefrequency.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.mslab.enabled</name>
-    <value></value>
-    <description>
-      Enables the MemStore-Local Allocation Buffer,
-      a feature which works to prevent heap fragmentation under
-      heavy write loads. This can reduce the frequency of stop-the-world
-      GC pauses on large heaps.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.max.filesize</name>
-    <value></value>
-    <description>
-    Maximum HStoreFile size. If any one of a column families' HStoreFiles has
-    grown to exceed this value, the hosting HRegion is split in two.
-    Default: 1G.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.scanner.caching</name>
-    <value></value>
-    <description>Number of rows that will be fetched when calling next
-    on a scanner if it is not served from (local, client) memory. Higher
-    caching values will enable faster scanners but will eat up more memory
-    and some calls of next may take longer and longer times when the cache is empty.
-    Do not set this value such that the time between invocations is greater
-    than the scanner timeout; i.e. hbase.regionserver.lease.period
-    </description>
-  </property>
-  <property>
-    <name>zookeeper.session.timeout</name>
-    <value></value>
-    <description>ZooKeeper session timeout.
-      HBase passes this to the zk quorum as suggested maximum time for a
-      session (This setting becomes zookeeper's 'maxSessionTimeout').  See
-      http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
-      "The client sends a requested timeout, the server responds with the
-      timeout that it can give the client. " In milliseconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.keyvalue.maxsize</name>
-    <value></value>
-    <description>Specifies the combined maximum allowed size of a KeyValue
-    instance. This is to set an upper boundary for a single entry saved in a
-    storage file. Since they cannot be split it helps avoiding that a region
-    cannot be split any further because the data is too large. It seems wise
-    to set this to a fraction of the maximum region size. Setting it to zero
-    or less disables the check.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.compactionThreshold</name>
-    <value></value>
-    <description>
-    If more than this number of HStoreFiles in any one HStore
-    (one HStoreFile is written per flush of memstore) then a compaction
-    is run to rewrite all HStoreFiles files as one.  Larger numbers
-    put off compaction but when it runs, it takes longer to complete.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.blockingStoreFiles</name>
-    <value></value>
-    <description>
-    If more than this number of StoreFiles in any one Store
-    (one StoreFile is written per flush of MemStore) then updates are
-    blocked for this HRegion until a compaction is completed, or
-    until hbase.hstore.blockingWaitTime has been exceeded.
-    </description>
-  </property>
-  <property>
-    <name>hfile.block.cache.size</name>
-    <value></value>
-    <description>
-        Percentage of maximum heap (-Xmx setting) to allocate to block cache
-        used by HFile/StoreFile. Default of 0.25 means allocate 25%.
-        Set to 0 to disable but it's not recommended.
-    </description>
-  </property>
-
-  <!-- The following properties configure authentication information for
-       HBase processes when using Kerberos security.  There are no default
-       values, included here for documentation purposes -->
-  <property>
-    <name>hbase.master.keytab.file</name>
-    <value></value>
-    <description>Full path to the kerberos keytab file to use for logging in
-    the configured HMaster server principal.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.kerberos.principal</name>
-    <value></value>
-    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
-    that should be used to run the HMaster process.  The principal name should
-    be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the hostname
-    portion, it will be replaced with the actual hostname of the running
-    instance.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.keytab.file</name>
-    <value></value>
-    <description>Full path to the kerberos keytab file to use for logging in
-    the configured HRegionServer server principal.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.kerberos.principal</name>
-    <value></value>
-    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
-    that should be used to run the HRegionServer process.  The principal name
-    should be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the
-    hostname portion, it will be replaced with the actual hostname of the
-    running instance.  An entry for this principal must exist in the file
-    specified in hbase.regionserver.keytab.file
-    </description>
-  </property>
-
-  <!-- Additional configuration specific to HBase security -->
-  <property>
-    <name>hbase.superuser</name>
-    <value>hbase</value>
-    <description>List of users or groups (comma-separated), who are allowed
-    full privileges, regardless of stored ACLs, across the cluster.
-    Only used when HBase security is enabled.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.region.classes</name>
-    <value></value>
-    <description>A comma-separated list of Coprocessors that are loaded by
-    default on all tables. For any override coprocessor method, these classes
-    will be called in order. After implementing your own Coprocessor, just put
-    it in HBase's classpath and add the fully qualified class name here.
-    A coprocessor can also be loaded on demand by setting HTableDescriptor.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.master.classes</name>
-    <value></value>
-    <description>A comma-separated list of
-      org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
-      loaded by default on the active HMaster process. For any implemented
-      coprocessor methods, the listed classes will be called in order. After
-      implementing your own MasterObserver, just put it in HBase's classpath
-      and add the fully qualified class name here.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.zookeeper.property.clientPort</name>
-    <value>2181</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-    The port at which the clients will connect.
-    </description>
-  </property>
-
-  <!--
-  The following three properties are used together to create the list of
-  host:peer_port:leader_port quorum servers for ZooKeeper.
-  -->
-  <property>
-    <name>hbase.zookeeper.quorum</name>
-    <value></value>
-    <description>Comma separated list of servers in the ZooKeeper Quorum.
-    For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
-    By default this is set to localhost for local and pseudo-distributed modes
-    of operation. For a fully-distributed setup, this should be set to a full
-    list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
-    this is the list of servers which we will start/stop ZooKeeper on.
-    </description>
-  </property>
-  <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
-
-  <property>
-    <name>dfs.support.append</name>
-    <value></value>
-    <description>Does HDFS allow appends to files?
-    This is an hdfs config. set in here so the hdfs client will do append support.
-    You must ensure that this config. is true serverside too when running hbase
-    (You will have to restart your cluster after setting it).
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.client.read.shortcircuit</name>
-    <value></value>
-    <description>Enable/Disable short circuit read for your client.
-    Hadoop servers should be configured to allow short circuit read
-    for the hbase user for this to take effect
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.client.read.shortcircuit.skip.checksum</name>
-    <value></value>
-    <description>Enable/disbale skipping the checksum check</description>
-  </property>
-
-  <property>
-    <name>hbase.regionserver.optionalcacheflushinterval</name>
-    <value>10000</value>
-    <description>
-      Amount of time to wait since the last time a region was flushed before
-      invoking an optional cache flush. Default 60,000.
-    </description>
-  </property>
-  
-  <property>
-    <name>hbase.zookeeper.useMulti</name>
-    <value>true</value>
-    <description>Instructs HBase to make use of ZooKeeper's multi-update functionality.
-    This allows certain ZooKeeper operations to complete more quickly and prevents some issues
-    with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).ยท
-    IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+
-    and will not be downgraded.  ZooKeeper versions before 3.4 do not support multi-update and will
-    not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).
-    </description>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HBASE/metainfo.xml
deleted file mode 100644
index 553fa2b..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HBASE/metainfo.xml
+++ /dev/null
@@ -1,40 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>mapred</user>
-    <comment>Non-relational distributed database and centralized service for configuration management &amp; synchronization</comment>
-    <version>0.94.5</version>
-
-    <components>
-        <component>
-            <name>HBASE_MASTER</name>
-            <category>MASTER</category>
-        </component>
-
-        <component>
-            <name>HBASE_REGIONSERVER</name>
-            <category>SLAVE</category>
-        </component>
-
-        <component>
-            <name>HBASE_CLIENT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/incubator-ambari/blob/d3e1eab5/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HCATALOG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HCATALOG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HCATALOG/metainfo.xml
deleted file mode 100644
index 1951a5d..0000000
--- a/ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/HCATALOG/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for HCATALOG service</comment>
-    <version>0.5.0</version>
-
-    <components>
-        <component>
-            <name>HCAT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>