You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jl...@apache.org on 2014/12/31 06:44:40 UTC

[1/3] ambari git commit: AMBARI-8876: Common Services: Refactor HDPWIN 2.1 stack to use common services (Jayush Luniya)

Repository: ambari
Updated Branches:
  refs/heads/trunk 856790517 -> af6f6e877


http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/PIG/configuration/pig-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/PIG/configuration/pig-log4j.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/PIG/configuration/pig-log4j.xml
deleted file mode 100644
index cbdd452..0000000
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/PIG/configuration/pig-log4j.xml
+++ /dev/null
@@ -1,61 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-
-  <property>
-    <name>content</name>
-    <value>
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-# ***** Set root logger level to DEBUG and its only appender to A.
-log4j.logger.org.apache.pig=info, A
-
-# ***** A is set to be a ConsoleAppender.
-log4j.appender.A=org.apache.log4j.ConsoleAppender
-# ***** A uses PatternLayout.
-log4j.appender.A.layout=org.apache.log4j.PatternLayout
-log4j.appender.A.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n
-    </value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/PIG/configuration/pig-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/PIG/configuration/pig-properties.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/PIG/configuration/pig-properties.xml
deleted file mode 100644
index 88e2fea..0000000
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/PIG/configuration/pig-properties.xml
+++ /dev/null
@@ -1,262 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-
-  <property>
-    <name>pig-content</name>
-    <value>
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#  http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-# Pig configuration file. All values can be overwritten by command line arguments.
-
-# Use the "-h properties" command to see description of the properties
-
-# log4jconf log4j configuration file
-# log4jconf=./conf/log4j.properties
-
-# a file that contains pig script
-#file=
-
-# load jarfile, colon separated
-#jar=
-
-#verbose print all log messages to screen (default to print only INFO and above to screen)
-#verbose=true
-
-#exectype local|mapreduce, mapreduce is default
-#exectype=local
-
-#the default timezone: if it is not set, the default timezone for this host is used.
-#the correct timezone format is the UTC offset: e.g., +08:00.
-#pig.datetime.default.tz=
-
-#pig.logfile=
-
-#Do not spill temp files smaller than this size (bytes)
-#pig.spill.size.threshold=5000000
-
-#EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
-#This should help reduce the number of files being spilled.
-#pig.spill.gc.activation.size=40000000
-
-#the following two parameters are to help estimate the reducer number
-#pig.exec.reducers.bytes.per.reducer=1000000000
-#pig.exec.reducers.max=999
-
-#Logging properties
-#verbose=false
-#brief=false
-#debug=INFO
-#aggregate.warning=true
-
-#Performance tuning properties
-#pig.cachedbag.memusage=0.2
-#pig.skewedjoin.reduce.memusage=0.3
-#pig.exec.nocombiner=false
-#opt.multiquery=true
-
-#Following parameters are for configuring intermediate storage format
-#Supported storage types are seqfile and tfile
-#Supported codec types: tfile supports gz(gzip) and lzo, seqfile support gz(gzip), lzo, snappy, bzip2
-#pig.tmpfilecompression=false
-#pig.tmpfilecompression.storage=seqfile
-#pig.tmpfilecompression.codec=gz
-
-#pig.noSplitCombination=true
-
-#pig.exec.mapPartAgg=false
-#pig.exec.mapPartAgg.minReduction=10
-
-#exectype=mapreduce
-#pig.additional.jars=&lt;comma seperated list of jars&gt;
-#udf.import.list=&lt;comma seperated list of imports&gt;
-#stop.on.failure=false
-
-#Use this option only when your Pig job will otherwise die because of
-#using more counters than hadoop configured limit
-#pig.disable.counter=true
-
-# By default, pig will allow 1GB of data to be replicated using
-# the distributed cache when doing fragment-replicated join.
-# pig.join.replicated.max.bytes=1000000000
-
-# Use this option to turn on UDF timers. This will cause two
-# counters to be tracked for every UDF and LoadFunc in your script:
-# approx_microsecs measures approximate time spent inside a UDF
-# approx_invocations reports the approximate number of times the UDF was invoked
-# pig.udf.profile=false
-
-#When enabled, 'describe' prints a multi-line formatted schema
-#(similar to an indended json) rather than on a single line.
-#pig.pretty.print.schema=true
-
-#pig.sql.type=hcat
-hcat.bin=c:\hdp\hcatalog-@hcat.version@\\bin\\hcat.py
-
-############################ SchemaTuple ############################
-
-# Setting this value will turn on the SchemaTuple feature (PIG-2632)
-# This will attempt to use code generation for more efficient within
-# the pig code. This can lead to both CPU, serialization, and memory
-# benefits (currently, the potential memory benefits are the largest).
-
-# This parameter will enable the optimization in all available cases
-#pig.schematuple=true
-
-# Certain cases can be turned off by uncommenting the following. These will
-# all be off by default, but will all be turned on if pig.schematuple is set
-# to true.
-
-# This will disable SchemaTuples in the case of udfs. Currently,
-# the input to UDF's will be SchemaTuples.
-
-#pig.schematuple.udf=false
-
-# This is currently not implemented. In the future, LoadFunc's with known
-# schema's should output SchemaTuples
-
-#pig.schematuple.load=false
-
-# This will use SchemaTuples in replicated joins. The potential memory saving
-# here is significant. It will use SchemaTuples when it builds the HashMap of
-# the join key to related values.
-
-#pig.schematuple.fr_join=false
-
-# In the current implementation of merge join, all of the Tuples in the left relation
-# that share a given key will be stored in a List in memory. This will use SchemaTuples
-# instead in that List.
-
-#pig.schematuple.merge_join=false
-
-#####################################################################
-
-##### Set up optional Pig Progress Notification Listener ############
-
-# Note that only one PPNL can be set up. If you need several, write a PPNL that will chain them.
-# pig.notification.listener = &lt;fully qualified class name of a PPNL implementation&gt;
-
-# Optionally, you can supply a single String argument to pass to your PPNL.
-# pig.notification.listener.arg = &lt;somevalue&gt;
-
-#####################################################################
-
-########## Override the default Reducer Estimator logic #############
-
-# By default, the logic to estimate the number of reducers to use for a given job lives in:
-#   org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.InputSizeReducerEstimator
-# This logic can be replaced by implementing the following interface:
-#   org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigReducerEstimator
-
-# This class will be invoked to estimate the number of reducers to use.
-# pig.exec.reducer.estimator = &lt;fully qualified class name of a PigReducerEstimator implementation&gt;
-
-# Optionally, you can supply a single String argument to pass to your PigReducerEstimator.
-# pig.exec.reducer.estimator.arg = &lt;somevalue&gt;
-
-#####################################################################
-
-###### Override the default Pig Stats Output Size Reader logic ######
-
-# By default, the size of reducers output is computed as the total size of
-# output files. But since not every storage is file-based, this logic is not
-# always applicable. If that is the case, the logic can be replaced by
-# implementing the following interface:
-#   org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigStatsOutputSizeReader
-
-# This class will be invoked to compute the size of reducers output.
-# pig.stats.output.size.reader = &lt;fully qualified class name of a PigStatsOutputSizeReader implementation&gt;
-
-# If you need to register more than one reader, you can register them as a comma
-# separated list. Every reader implements a boolean supports(POStore sto) method.
-# When there are more than one reader, they are consulted in order, and the
-# first one whose supports() method returns true will be used.
-#
-#####################################################################
-
-#pig.load.default.statements=
-
-#####################################################################
-
-########### Override hadoop configs programatically #################
-
-# By default, Pig expects hadoop configs (hadoop-site.xml and core-site.xml)
-# to be present on the classpath. There are cases when these configs are
-# needed to be passed programatically, such as while using the PigServer API.
-# In such cases, you can override hadoop configs by setting the property
-# "pig.use.overriden.hadoop.configs".
-#
-# When this property is set to true, Pig ignores looking for hadoop configs
-# in the classpath and instead picks it up from Properties/Configuration
-# object passed to it.
-
-# pig.use.overriden.hadoop.configs=false
-#
-######################################################################
-
-# Check if the script needs to check multiple stores writing
-# to the same location. When set to true, stops the execution
-# of script right away.
-pig.location.check.strict=false
-
-######################################################################
-
-# This key is used to define the default load func. Pig will fallback
-# on PigStorage as default in case this is undefined.
-
-# pig.default.load.func=&lt;fully qualified class name of a LoadFunc implementation&gt;
-# For eg, pig.default.load.func=org.apache.pig.custom.MyCustomStorage
-
-# This key is used to define the default store func. Pig will fallback
-# on PigStorage as default in case this is undefined.
-
-# pig.default.store.func=&lt;fully qualified class name of a StoreFunc implementation&gt;
-# For eg, pig.default.store.func=org.apache.pig.custom.MyCustomStorage
-
-# This option is used to define whether to support recovery to handle the
-# application master getting restarted.
-# pig.output.committer.recovery.support=true
-
-# Set this option to true if you need to use the old partition filter optimizer.
-# Note: Old filter optimizer PColFilterOptimizer will be deprecated in the future.
-# pig.exec.useOldPartitionFilterOptimizer=true
-
-    </value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/PIG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/PIG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/PIG/metainfo.xml
index 68f7566..03de475 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/PIG/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/PIG/metainfo.xml
@@ -20,56 +20,21 @@
   <services>
     <service>
       <name>PIG</name>
-      <displayName>Pig</displayName>
-      <comment>Scripting platform for analyzing large datasets</comment>
-      <version>0.12.0.2.0</version>
+      <extends>common-services/PIG/0.12.0.2.0</extends>
+      <version>0.12.1.2.1.1.0</version>
       <components>
         <component>
           <name>PIG</name>
-          <displayName>Pig</displayName>
-          <category>CLIENT</category>
-          <cardinality>0+</cardinality>
-          <commandScript>
-            <script>scripts/pig_client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
           <configFiles>
             <configFile>
               <type>env</type>
               <fileName>pig-env.cmd</fileName>
               <dictionaryName>pig-env</dictionaryName>
             </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>log4j.properties</fileName>
-              <dictionaryName>pig-log4j</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>pig.properties</fileName>
-              <dictionaryName>pig-properties</dictionaryName>
-            </configFile>
           </configFiles>
         </component>
       </components>
 
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <requiredServices>
-        <service>YARN</service>
-      </requiredServices>
-
-      <configuration-dependencies>
-        <config-type>pig-env</config-type>
-        <config-type>pig-log4j</config-type>
-        <config-type>pig-properties</config-type>
-      </configuration-dependencies>
-
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/configuration/sqoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/configuration/sqoop-env.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/configuration/sqoop-env.xml
index c3bcf7e..5fa7bcb 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/configuration/sqoop-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/configuration/sqoop-env.xml
@@ -21,11 +21,17 @@
 -->
 
 <configuration>
-  <!-- sqoop-env.sh -->
+  <property>
+    <name>sqoop_user</name>
+    <deleted>true</deleted>
+  </property>
+
+  <!-- sqoop-env.cmd -->
   <property>
     <name>content</name>
     <description>This is the jinja template for sqoop-env.cmd file</description>
-    <value>@echo off
+    <value>
+@echo off
 :: Licensed to the Apache Software Foundation (ASF) under one or more
 :: contributor license agreements.  See the NOTICE file distributed with
 :: this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/metainfo.xml
index 13f9630..678a8a7 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/SQOOP/metainfo.xml
@@ -20,38 +20,11 @@
   <services>
     <service>
       <name>SQOOP</name>
-      <displayName>Sqoop</displayName>
-      <comment>Tool for transferring bulk data between Apache Hadoop and
-        structured data stores such as relational databases
-      </comment>
-      <version>1.4.4.2.0</version>
-
+      <extends>common-services/SQOOP/1.4.4.2.0</extends>
+      <version>1.4.4.2.1.1.0</version>
       <components>
         <component>
           <name>SQOOP</name>
-          <displayName>Sqoop</displayName>
-          <category>CLIENT</category>
-          <cardinality>1+</cardinality>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/sqoop_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
           <configFiles>
             <configFile>
               <type>env</type>
@@ -61,20 +34,6 @@
           </configFiles>
         </component>
       </components>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <requiredServices>
-        <service>HDFS</service>
-      </requiredServices>
-
-      <configuration-dependencies>
-        <config-type>sqoop-env</config-type>
-      </configuration-dependencies>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/configuration/storm-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/configuration/storm-env.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/configuration/storm-env.xml
new file mode 100644
index 0000000..4d9aae6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/configuration/storm-env.xml
@@ -0,0 +1,47 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>storm_user</name>
+    <deleted>true</deleted>
+  </property>
+
+  <property>
+    <name>storm_log_dir</name>
+    <value>c:\hadoop\logs\storm</value>
+    <description></description>
+  </property>
+  <property>
+    <name>storm_pid_dir</name>
+    <value>c:\hadoop\run\storm</value>
+    <description></description>
+  </property>
+
+  <!-- storm-env.cmd -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for storm-env.cmd file</description>
+    <value>
+    </value>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/configuration/storm-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/configuration/storm-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/configuration/storm-site.xml
index 5a23314..c0143f1 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/configuration/storm-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/configuration/storm-site.xml
@@ -22,627 +22,15 @@
 
 <configuration supports_final="true">
   <property>
-    <name>storm.zookeeper.servers</name>
-    <value>['localhost']</value>
-    <description>A list of hosts of ZooKeeper servers used to manage the cluster.</description>
-  </property>
-  <property>
-    <name>nimbus.host</name>
-    <value>localhost</value>
-    <description>The host that the master server is running on.</description>
-  </property>
-  <property>
     <name>storm.local.dir</name>
-    <value>c:\\hdpdata</value>
+    <value>c:\hadoop\storm</value>
     <description>A directory on the local filesystem used by Storm for any local
        filesystem usage it needs. The directory must exist and the Storm daemons must
        have permission to read/write from this location.</description>
   </property>
   <property>
-    <name>logviewer.port</name>
-    <value>8081</value>
-    <description>HTTP UI port for log viewer.</description>
-  </property>
-  <property>
-    <name>storm.messaging.transport</name>
-    <value>backtype.storm.messaging.netty.Context</value>
-    <description>The transporter for communication among Storm tasks.</description>
-  </property>
-  <property>
-    <name>storm.messaging.netty.buffer_size</name>
-    <value>16384</value>
-    <description>Netty based messaging: The buffer size for send/recv buffer.</description>
-  </property>
-  <property>
-    <name>storm.messaging.netty.max_retries</name>
-    <value>10</value>
-    <description>Netty based messaging: The max # of retries that a peer will perform when a remote is not accessible.</description>
-  </property>
-  <property>
-    <name>storm.messaging.netty.max_wait_ms</name>
-    <value>5000</value>
-    <description>Netty based messaging: The max # of milliseconds that a peer will wait.</description>
-  </property>
-  <property>
-    <name>storm.messaging.netty.min_wait_ms</name>
-    <value>1000</value>
-    <description>Netty based messaging: The min # of milliseconds that a peer will wait.</description>
-  </property>
-  <property>
-    <name>ui.port</name>
-    <value>8772</value>
-    <description>Storm UI binds to this port.</description>
-  </property>
-  <property>
-    <name>java.library.path</name>
-    <value>/usr/local/lib:/opt/local/lib:/usr/lib</value>
-    <description>This value is passed to spawned JVMs (e.g., Nimbus, Supervisor, and Workers)
-      for the java.library.path value. java.library.path tells the JVM where
-      to look for native libraries. It is necessary to set this config correctly since
-      Storm uses the ZeroMQ and JZMQ native libs.
-    </description>
-  </property>
-  <property>
-    <name>storm.zookeeper.servers</name>
-    <value>['localhost']</value>
-    <description>A list of hosts of ZooKeeper servers used to manage the cluster.</description>
-  </property>
-  <property>
-    <name>storm.zookeeper.port</name>
-    <value>2181</value>
-    <description>The port Storm will use to connect to each of the ZooKeeper servers.</description>
-  </property>
-  <property>
-    <name>storm.zookeeper.root</name>
-    <value>/storm</value>
-    <description>The root location at which Storm stores data in ZooKeeper.</description>
-  </property>
-  <property>
-    <name>storm.zookeeper.session.timeout</name>
-    <value>20000</value>
-    <description>The session timeout for clients to ZooKeeper.</description>
-  </property>
-  <property>
-    <name>storm.zookeeper.connection.timeout</name>
-    <value>15000</value>
-    <description>The connection timeout for clients to ZooKeeper.</description>
-  </property>
-  <property>
-    <name>storm.zookeeper.retry.times</name>
-    <value>5</value>
-    <description>The number of times to retry a Zookeeper operation.</description>
-  </property>
-  <property>
-    <name>storm.zookeeper.retry.interval</name>
-    <value>1000</value>
-    <description>The interval between retries of a Zookeeper operation.</description>
-  </property>
-  <property>
-    <name>storm.zookeeper.retry.intervalceiling.millis</name>
-    <value>30000</value>
-    <description>The ceiling of the interval between retries of a Zookeeper operation.</description>
-  </property>
-  <property>
-    <name>storm.cluster.mode</name>
-    <value>distributed</value>
-    <description>The mode this Storm cluster is running in. Either "distributed" or "local".</description>
-  </property>
-  <property>
-    <name>storm.local.mode.zmq</name>
-    <value>false</value>
-    <description>Whether or not to use ZeroMQ for messaging in local mode. If this is set
-      to false, then Storm will use a pure-Java messaging system. The purpose
-      of this flag is to make it easy to run Storm in local mode by eliminating
-      the need for native dependencies, which can be difficult to install.
-    </description>
-  </property>
-  <property>
-    <name>storm.thrift.transport</name>
-    <value>backtype.storm.security.auth.SimpleTransportPlugin</value>
-    <description>The transport plug-in for Thrift client/server communication.</description>
-  </property>
-  <property>
-    <name>storm.messaging.transport</name>
-    <value>backtype.storm.messaging.netty.Context</value>
-    <description>The transporter for communication among Storm tasks.</description>
-  </property>
-  <property>
-    <name>nimbus.host</name>
-    <value>localhost</value>
-    <description>The host that the master server is running on.</description>
-  </property>
-  <property>
-    <name>nimbus.thrift.port</name>
-    <value>6627</value>
-    <description>Which port the Thrift interface of Nimbus should run on. Clients should
-      connect to this port to upload jars and submit topologies.
-    </description>
-  </property>
-  <property>
-    <name>nimbus.thrift.max_buffer_size</name>
-    <value>1048576</value>
-    <description>The maximum buffer size thrift should use when reading messages.</description>
-  </property>
-  <property>
-    <name>nimbus.childopts</name>
-    <value>-Xmx1024m</value>
-    <description>This parameter is used by the storm-deploy project to configure the jvm options for the nimbus
-      daemon.
-    </description>
-  </property>
-  <property>
-    <name>nimbus.task.timeout.secs</name>
-    <value>30</value>
-    <description>How long without heartbeating a task can go before nimbus will consider the task dead and reassign it
-      to another location.
-    </description>
-  </property>
-  <property>
-    <name>nimbus.supervisor.timeout.secs</name>
-    <value>60</value>
-    <description>How long before a supervisor can go without heartbeating before nimbus considers it dead and stops
-      assigning new work to it.
-    </description>
-  </property>
-  <property>
-    <name>nimbus.monitor.freq.secs</name>
-    <value>10</value>
-    <description>
-      How often nimbus should wake up to check heartbeats and do reassignments. Note
-      that if a machine ever goes down Nimbus will immediately wake up and take action.
-      This parameter is for checking for failures when there's no explicit event like that occuring.
-    </description>
-  </property>
-  <property>
-    <name>nimbus.cleanup.inbox.freq.secs</name>
-    <value>600</value>
-    <description>How often nimbus should wake the cleanup thread to clean the inbox.</description>
-  </property>
-  <property>
-    <name>nimbus.inbox.jar.expiration.secs</name>
-    <value>3600</value>
-    <description>
-      The length of time a jar file lives in the inbox before being deleted by the cleanup thread.
-
-      Probably keep this value greater than or equal to NIMBUS_CLEANUP_INBOX_JAR_EXPIRATION_SECS.
-      Note that the time it takes to delete an inbox jar file is going to be somewhat more than
-      NIMBUS_CLEANUP_INBOX_JAR_EXPIRATION_SECS (depending on how often NIMBUS_CLEANUP_FREQ_SECS is set to).
-    </description>
-  </property>
-  <property>
-    <name>nimbus.task.launch.secs</name>
-    <value>120</value>
-    <description>A special timeout used when a task is initially launched. During launch, this is the timeout
-      used until the first heartbeat, overriding nimbus.task.timeout.secs.
-    </description>
-  </property>
-  <property>
-    <name>nimbus.reassign</name>
-    <value>true</value>
-    <description>Whether or not nimbus should reassign tasks if it detects that a task goes down.
-      Defaults to true, and it's not recommended to change this value.
-    </description>
-  </property>
-  <property>
-    <name>nimbus.file.copy.expiration.secs</name>
-    <value>600</value>
-    <description>During upload/download with the master, how long an upload or download connection is idle
-      before nimbus considers it dead and drops the connection.
-    </description>
-  </property>
-  <property>
-    <name>nimbus.topology.validator</name>
-    <value>backtype.storm.nimbus.DefaultTopologyValidator</value>
-    <description>A custom class that implements ITopologyValidator that is run whenever a
-      topology is submitted. Can be used to provide business-specific logic for
-      whether topologies are allowed to run or not.
-    </description>
-  </property>
-  <property>
-    <name>ui.childopts</name>
-    <value>-Xmx768m -Djava.security.auth.login.config=/etc/storm/conf/storm_jaas.conf</value>
-    <description>Childopts for Storm UI Java process.</description>
-  </property>
-  <property>
-    <name>logviewer.childopts</name>
-    <value>-Xmx128m</value>
-    <description>Childopts for log viewer java process.</description>
-  </property>
-  <property>
-    <name>logviewer.appender.name</name>
-    <value>A1</value>
-    <description>Appender name used by log viewer to determine log directory.</description>
-  </property>
-  <property>
-    <name>drpc.port</name>
-    <value>3772</value>
-    <description>This port is used by Storm DRPC for receiving DPRC requests from clients.</description>
-  </property>
-  <property>
-    <name>drpc.worker.threads</name>
-    <value>64</value>
-    <description>DRPC thrift server worker threads.</description>
-  </property>
-  <property>
-    <name>drpc.queue.size</name>
-    <value>128</value>
-    <description>DRPC thrift server queue size.</description>
-  </property>
-  <property>
-    <name>drpc.invocations.port</name>
-    <value>3773</value>
-    <description>This port on Storm DRPC is used by DRPC topologies to receive function invocations and send results
-      back.
-    </description>
-  </property>
-  <property>
-    <name>drpc.request.timeout.secs</name>
-    <value>600</value>
-    <description>The timeout on DRPC requests within the DRPC server. Defaults to 10 minutes. Note that requests can
-      also
-      timeout based on the socket timeout on the DRPC client, and separately based on the topology message
-      timeout for the topology implementing the DRPC function.
-    </description>
-  </property>
-  <property>
-    <name>drpc.childopts</name>
-    <value>-Xmx768m</value>
-    <description>Childopts for Storm DRPC Java process.</description>
-  </property>
-  <property>
-    <name>transactional.zookeeper.root</name>
-    <value>/transactional</value>
-    <description>The root directory in ZooKeeper for metadata about TransactionalSpouts.</description>
-  </property>
-  <property>
-    <name>transactional.zookeeper.servers</name>
-    <value>null</value>
-    <description>The list of zookeeper servers in which to keep the transactional state. If null (which is default),
-      will use storm.zookeeper.servers
-    </description>
-  </property>
-  <property>
-    <name>transactional.zookeeper.port</name>
-    <value>null</value>
-    <description>The port to use to connect to the transactional zookeeper servers. If null (which is default),
-      will use storm.zookeeper.port
-    </description>
-  </property>
-  <property>
-    <name>supervisor.slots.ports</name>
-    <value>[6700, 6701]</value>
-    <description>A list of ports that can run workers on this supervisor. Each worker uses one port, and
-      the supervisor will only run one worker per port. Use this configuration to tune
-      how many workers run on each machine.
-    </description>
-  </property>
-  <property>
-    <name>supervisor.childopts</name>
-    <value>-Xmx256m</value>
-    <description>This parameter is used by the storm-deploy project to configure the jvm options for the supervisor
-      daemon.
-    </description>
-  </property>
-  <property>
-    <name>supervisor.worker.start.timeout.secs</name>
-    <value>120</value>
-    <description>How long a worker can go without heartbeating during the initial launch before
-      the supervisor tries to restart the worker process. This value override
-      supervisor.worker.timeout.secs during launch because there is additional
-      overhead to starting and configuring the JVM on launch.
-    </description>
-  </property>
-  <property>
-    <name>supervisor.worker.timeout.secs</name>
-    <value>30</value>
-    <description>How long a worker can go without heartbeating before the supervisor tries to restart the worker
-      process.
-    </description>
-  </property>
-  <property>
-    <name>supervisor.monitor.frequency.secs</name>
-    <value>3</value>
-    <description>How often the supervisor checks the worker heartbeats to see if any of them need to be restarted.
-    </description>
-  </property>
-  <property>
-    <name>supervisor.heartbeat.frequency.secs</name>
-    <value>5</value>
-    <description>How often the supervisor sends a heartbeat to the master.</description>
-  </property>
-  <property>
-    <name>worker.childopts</name>
-    <value>-Xmx768m</value>
-    <description>The jvm opts provided to workers launched by this supervisor. All \"%ID%\" substrings are replaced with
-      an identifier for this worker.
-    </description>
-  </property>
-  <property>
-    <name>worker.heartbeat.frequency.secs</name>
-    <value>1</value>
-    <description>How often this worker should heartbeat to the supervisor.</description>
-  </property>
-  <property>
-    <name>task.heartbeat.frequency.secs</name>
-    <value>3</value>
-    <description>How often a task should heartbeat its status to the master.</description>
-  </property>
-  <property>
-    <name>task.refresh.poll.secs</name>
-    <value>10</value>
-    <description>How often a task should sync its connections with other tasks (if a task is
-      reassigned, the other tasks sending messages to it need to refresh their connections).
-      In general though, when a reassignment happens other tasks will be notified
-      almost immediately. This configuration is here just in case that notification doesn't
-      come through.
-    </description>
-  </property>
-  <property>
-    <name>zmq.threads</name>
-    <value>1</value>
-    <description>The number of threads that should be used by the zeromq context in each worker process.</description>
-  </property>
-  <property>
-    <name>zmq.linger.millis</name>
-    <value>5000</value>
-    <description>How long a connection should retry sending messages to a target host when
-      the connection is closed. This is an advanced configuration and can almost
-      certainly be ignored.
-    </description>
-  </property>
-  <property>
-    <name>zmq.hwm</name>
-    <value>0</value>
-    <description>The high water for the ZeroMQ push sockets used for networking. Use this config to prevent buffer
-      explosion
-      on the networking layer.
-    </description>
-  </property>
-  <property>
-    <name>storm.messaging.netty.server_worker_threads</name>
-    <value>1</value>
-    <description>Netty based messaging: The # of worker threads for the server.</description>
-  </property>
-  <property>
-    <name>storm.messaging.netty.client_worker_threads</name>
-    <value>1</value>
-    <description>Netty based messaging: The # of worker threads for the client.</description>
-  </property>
-  <property>
-    <name>topology.enable.message.timeouts</name>
-    <value>true</value>
-    <description>True if Storm should timeout messages or not. Defaults to true. This is meant to be used
-      in unit tests to prevent tuples from being accidentally timed out during the test.
-    </description>
-  </property>
-  <property>
-    <name>topology.debug</name>
-    <value>false</value>
-    <description>When set to true, Storm will log every message that's emitted.</description>
-  </property>
-  <property>
-    <name>topology.optimize</name>
-    <value>true</value>
-    <description>Whether or not the master should optimize topologies by running multiple tasks in a single thread where
-      appropriate.
-    </description>
-  </property>
-  <property>
-    <name>topology.workers</name>
-    <value>1</value>
-    <description>How many processes should be spawned around the cluster to execute this
-      topology. Each process will execute some number of tasks as threads within
-      them. This parameter should be used in conjunction with the parallelism hints
-      on each component in the topology to tune the performance of a topology.
-    </description>
-  </property>
-  <property>
-    <name>topology.acker.executors</name>
-    <value>null</value>
-    <description>How many executors to spawn for ackers.
-
-      If this is set to 0, then Storm will immediately ack tuples as soon
-      as they come off the spout, effectively disabling reliability.
-    </description>
-  </property>
-  <property>
-    <name>topology.message.timeout.secs</name>
-    <value>30</value>
-    <description>The maximum amount of time given to the topology to fully process a message
-      emitted by a spout. If the message is not acked within this time frame, Storm
-      will fail the message on the spout. Some spouts implementations will then replay
-      the message at a later time.
-    </description>
-  </property>
-  <property>
-    <name>topology.skip.missing.kryo.registrations</name>
-    <value>false</value>
-    <description>Whether or not Storm should skip the loading of kryo registrations for which it
-      does not know the class or have the serializer implementation. Otherwise, the task will
-      fail to load and will throw an error at runtime. The use case of this is if you want to
-      declare your serializations on the storm.yaml files on the cluster rather than every single
-      time you submit a topology. Different applications may use different serializations and so
-      a single application may not have the code for the other serializers used by other apps.
-      By setting this config to true, Storm will ignore that it doesn't have those other serializations
-      rather than throw an error.
-    </description>
-  </property>
-  <property>
-    <name>topology.max.task.parallelism</name>
-    <value>null</value>
-    <description>The maximum parallelism allowed for a component in this topology. This configuration is
-      typically used in testing to limit the number of threads spawned in local mode.
-    </description>
-  </property>
-  <property>
-    <name>topology.max.spout.pending</name>
-    <value>null</value>
-    <description>The maximum number of tuples that can be pending on a spout task at any given time.
-      This config applies to individual tasks, not to spouts or topologies as a whole.
-
-      A pending tuple is one that has been emitted from a spout but has not been acked or failed yet.
-      Note that this config parameter has no effect for unreliable spouts that don't tag
-      their tuples with a message id.
-    </description>
-  </property>
-  <property>
-    <name>topology.state.synchronization.timeout.secs</name>
-    <value>60</value>
-    <description>The maximum amount of time a component gives a source of state to synchronize before it requests
-      synchronization again.
-    </description>
-  </property>
-  <property>
-    <name>topology.stats.sample.rate</name>
-    <value>0.05</value>
-    <description>The percentage of tuples to sample to produce stats for a task.</description>
-  </property>
-  <property>
-    <name>topology.builtin.metrics.bucket.size.secs</name>
-    <value>60</value>
-    <description>The time period that builtin metrics data in bucketed into.</description>
-  </property>
-  <property>
-    <name>topology.fall.back.on.java.serialization</name>
-    <value>true</value>
-    <description>Whether or not to use Java serialization in a topology.</description>
-  </property>
-  <property>
-    <name>topology.worker.childopts</name>
-    <value>null</value>
-    <description>Topology-specific options for the worker child process. This is used in addition to WORKER_CHILDOPTS.
-    </description>
-  </property>
-  <property>
-    <name>topology.executor.receive.buffer.size</name>
-    <value>1024</value>
-    <description>The size of the Disruptor receive queue for each executor. Must be a power of 2.</description>
-  </property>
-  <property>
-    <name>topology.executor.send.buffer.size</name>
-    <value>1024</value>
-    <description>The size of the Disruptor send queue for each executor. Must be a power of 2.</description>
-  </property>
-  <property>
-    <name>topology.receiver.buffer.size</name>
-    <value>8</value>
-    <description>The maximum number of messages to batch from the thread receiving off the network to the
-      executor queues. Must be a power of 2.
-    </description>
-  </property>
-  <property>
-    <name>topology.transfer.buffer.size</name>
-    <value>1024</value>
-    <description>The size of the Disruptor transfer queue for each worker.</description>
-  </property>
-  <property>
-    <name>topology.tick.tuple.freq.secs</name>
-    <value>null</value>
-    <description>How often a tick tuple from the "__system" component and "__tick" stream should be sent
-      to tasks. Meant to be used as a component-specific configuration.
-    </description>
-  </property>
-  <property>
-    <name>topology.worker.shared.thread.pool.size</name>
-    <value>4</value>
-    <description>The size of the shared thread pool for worker tasks to make use of. The thread pool can be accessed
-      via the TopologyContext.
-    </description>
-  </property>
-  <property>
-    <name>topology.disruptor.wait.strategy</name>
-    <value>com.lmax.disruptor.BlockingWaitStrategy</value>
-    <description>Configure the wait strategy used for internal queuing. Can be used to tradeoff latency
-      vs. throughput.
-    </description>
-  </property>
-  <property>
-    <name>topology.executor.send.buffer.size</name>
-    <value>1024</value>
-    <description>The size of the Disruptor send queue for each executor. Must be a power of 2.</description>
-  </property>
-  <property>
-    <name>topology.receiver.buffer.size</name>
-    <value>8</value>
-    <description>The maximum number of messages to batch from the thread receiving off the network to the
-      executor queues. Must be a power of 2.
-    </description>
-  </property>
-  <property>
-    <name>topology.transfer.buffer.size</name>
-    <value>1024</value>
-    <description>The size of the Disruptor transfer queue for each worker.</description>
-  </property>
-  <property>
-    <name>topology.tick.tuple.freq.secs</name>
-    <value>null</value>
-    <description>How often a tick tuple from the "__system" component and "__tick" stream should be sent
-      to tasks. Meant to be used as a component-specific configuration.
-    </description>
-  </property>
-  <property>
-    <name>topology.worker.shared.thread.pool.size</name>
-    <value>4</value>
-    <description>The size of the shared thread pool for worker tasks to make use of. The thread pool can be accessed
-      via the TopologyContext.
-    </description>
-  </property>
-  <property>
-    <name>topology.spout.wait.strategy</name>
-    <value>backtype.storm.spout.SleepSpoutWaitStrategy</value>
-    <description>A class that implements a strategy for what to do when a spout needs to wait. Waiting is
-      triggered in one of two conditions:
-
-      1. nextTuple emits no tuples
-      2. The spout has hit maxSpoutPending and can't emit any more tuples
-    </description>
-  </property>
-  <property>
-    <name>topology.sleep.spout.wait.strategy.time.ms</name>
-    <value>1</value>
-    <description>The amount of milliseconds the SleepEmptyEmitStrategy should sleep for.</description>
-  </property>
-  <property>
-    <name>topology.error.throttle.interval.secs</name>
-    <value>10</value>
-    <description>The interval in seconds to use for determining whether to throttle error reported to Zookeeper. For
-      example,
-      an interval of 10 seconds with topology.max.error.report.per.interval set to 5 will only allow 5 errors to be
-      reported to Zookeeper per task for every 10 second interval of time.
-    </description>
-  </property>
-  <property>
-    <name>topology.max.error.report.per.interval</name>
-    <value>5</value>
-    <description>The interval in seconds to use for determining whether to throttle error reported to Zookeeper. For
-      example,
-      an interval of 10 seconds with topology.max.error.report.per.interval set to 5 will only allow 5 errors to be
-      reported to Zookeeper per task for every 10 second interval of time.
-    </description>
-  </property>
-  <property>
-    <name>topology.kryo.factory</name>
-    <value>backtype.storm.serialization.DefaultKryoFactory</value>
-    <description>Class that specifies how to create a Kryo instance for serialization. Storm will then apply
-      topology.kryo.register and topology.kryo.decorators on top of this. The default implementation
-      implements topology.fall.back.on.java.serialization and turns references off.
-    </description>
-  </property>
-  <property>
-    <name>topology.tuple.serializer</name>
-    <value>backtype.storm.serialization.types.ListDelegateSerializer</value>
-    <description>The serializer class for ListDelegate (tuple payload).
-      The default serializer will be ListDelegateSerializer
-    </description>
-  </property>
-  <property>
-    <name>topology.trident.batch.emit.interval.millis</name>
-    <value>500</value>
-    <description>How often a batch can be emitted in a Trident topology.</description>
-  </property>
-  <property>
     <name>dev.zookeeper.path</name>
-    <value>/tmp/dev-storm-zookeeper</value>
+    <value>c:\hadoop\temp\dev-storm-zookeeper</value>
     <description>The path to use as the zookeeper dir when running a zookeeper server via
       "storm dev-zookeeper". This zookeeper instance is only intended for development;
       it is not a production grade zookeeper setup.

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/metainfo.xml
index 9df2aa8..76022cc 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/STORM/metainfo.xml
@@ -21,72 +21,18 @@
   <services>
     <service>
       <name>STORM</name>
-      <displayName>Storm</displayName>
-      <comment>Apache Hadoop Stream processing framework</comment>
-      <version>0.9.1.2.1</version>
+      <extends>common-services/STORM/0.9.1.2.1</extends>
+      <version>0.9.1.2.1.1.0</version>
       <components>
-
         <component>
-          <name>NIMBUS</name>
-          <displayName>Nimbus</displayName>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <dependencies>
-            <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
-              <scope>cluster</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/nimbus.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
+          <name>STORM_REST_API</name>
+          <deleted>true</deleted>
         </component>
-
-        <component>
-          <name>SUPERVISOR</name>
-          <displayName>Supervisor</displayName>
-          <category>SLAVE</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts/supervisor.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
         <component>
-          <name>STORM_UI_SERVER</name>
-          <displayName>Storm UI Server</displayName>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <commandScript>
-            <script>scripts/ui_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
+          <name>DRPC_SERVER</name>
+          <deleted>true</deleted>
         </component>
-
       </components>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <requiredServices>
-        <service>ZOOKEEPER</service>
-      </requiredServices>
-
-      <configuration-dependencies>
-        <config-type>storm-site</config-type>
-        <config-type>storm-env</config-type>
-      </configuration-dependencies>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/TEZ/configuration/tez-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/TEZ/configuration/tez-env.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/TEZ/configuration/tez-env.xml
new file mode 100644
index 0000000..fd7e7b0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/TEZ/configuration/tez-env.xml
@@ -0,0 +1,36 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>tez_user</name>
+    <deleted>true</deleted>
+  </property>
+
+  <!-- tez-env.cmd -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for tez-env.cmd file</description>
+    <value>
+    </value>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/TEZ/configuration/tez-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/TEZ/configuration/tez-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/TEZ/configuration/tez-site.xml
index 42eaa45..ac788bf 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/TEZ/configuration/tez-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/TEZ/configuration/tez-site.xml
@@ -20,199 +20,8 @@
 <configuration supports_final="true">
 
   <property>
-    <name>tez.lib.uris</name>
-    <value>hdfs:///apps/tez/,hdfs:///apps/tez/lib/</value>
-    <!-- TODO:Remove this  value>${fs.default.name}/apps/tez/,${fs.default.name}/apps/tez/lib</value-->
-    <description>The location of the Tez libraries which will be localized for DAGs</description>
-  </property>
-
-  <property>
-    <name>tez.am.log.level</name>
-    <value>INFO</value>
-    <description>Root Logging level passed to the Tez app master</description>
-  </property>
-
-  <property>
     <name>tez.staging-dir</name>
-    <value>/tmp/${user.name}/staging</value>
+    <value>c:\hadoop\temp\${user.name}\staging</value>
     <description>The staging dir used while submitting DAGs</description>
   </property>
-
-  <property>
-    <name>tez.am.resource.memory.mb</name>
-    <value>1536</value>
-    <!-- TODO: Value needs to be set based on YARN configuration - similar to the way the MR AM size is set, 1.5 times the MR AM size -->
-    <description>The amount of memory to be used by the AppMaster</description>
-  </property>
-
-  <property>
-    <name>tez.am.java.opts</name>
-    <value>-server -Xmx1024m -Djava.net.preferIPv4Stack=true -XX:+UseNUMA -XX:+UseParallelGC</value>
-    <!-- TODO: Xmx value needs to be set based on tez.am.resource.memory.mb - typically at least 512MB less. Also this could have some additional hardware specific settings if Ambari supports it -->
-    <description>Java options for the Tez AppMaster process</description>
-  </property>
-  <!-- tez picks the java opts from yarn.app.mapreduce.am.command-opts for MR tasks. Likewise for the AM memory MB -->
-
-  <property>
-    <name>tez.am.shuffle-vertex-manager.min-src-fraction</name>
-    <value>0.2</value>
-    <description>In case of a ScatterGather connection, the fraction of source tasks which should
-      complete before tasks for the current vertex are schedule
-    </description>
-  </property>
-
-  <property>
-    <name>tez.am.shuffle-vertex-manager.max-src-fraction</name>
-    <value>0.4</value>
-    <description>In case of a ScatterGather connection, once this fraction of source tasks have
-      completed, all tasks on the current vertex can be scheduled. Number of tasks ready for
-      scheduling on the current vertex scales linearly between min-fraction and max-fraction
-    </description>
-  </property>
-
-  <property>
-    <name>tez.am.am-rm.heartbeat.interval-ms.max</name>
-    <value>250</value>
-    <description>The maximum heartbeat interval between the AM and RM in milliseconds</description>
-  </property>
-
-  <property>
-    <name>tez.am.grouping.split-waves</name>
-    <value>1.4</value>
-    <description>The multiplier for available queue capacity when determining number of tasks for
-      a Vertex. 1.4 with 100% queue available implies generating a number of tasks roughly equal
-      to 140% of the available containers on the queue
-    </description>
-  </property>
-
-  <property>
-    <name>tez.am.grouping.min-size</name>
-    <value>16777216</value>
-    <description>Lower bound on the size (in bytes) of a grouped split, to avoid generating
-      too many splits
-    </description>
-  </property>
-
-  <property>
-    <name>tez.am.grouping.max-size</name>
-    <value>1073741824</value>
-    <description>Upper bound on the size (in bytes) of a grouped split, to avoid generating
-      excessively large split
-    </description>
-  </property>
-
-  <property>
-    <name>tez.am.container.reuse.enabled</name>
-    <value>true</value>
-    <description>Configuration to specify whether container should be reused</description>
-  </property>
-
-  <property>
-    <name>tez.am.container.reuse.rack-fallback.enabled</name>
-    <value>true</value>
-    <description>Whether to reuse containers for rack local tasks. Active only if reuse is enabled
-    </description>
-  </property>
-
-  <property>
-    <name>tez.am.container.reuse.non-local-fallback.enabled</name>
-    <value>true</value>
-    <description>Whether to reuse containers for non-local tasks. Active only if reuse is enabled
-    </description>
-  </property>
-
-  <property>
-    <name>tez.am.container.session.delay-allocation-millis</name>
-    <value>10000</value>
-    <!-- TODO This value may change -->
-    <description>The amount of time to hold on to a container if no task can be assigned to
-      it immediately. Only active when reuse is enabled. Set to -1 to never release a container
-      in a session
-    </description>
-  </property>
-
-  <property>
-    <name>tez.am.container.reuse.locality.delay-allocation-millis</name>
-    <value>250</value>
-    <description>The amount of time to wait before assigning a container to the next level of
-      locality. NODE -&gt; RACK -&gt; NON_LOCAL
-    </description>
-  </property>
-
-  <property>
-    <name>tez.task.get-task.sleep.interval-ms.max</name>
-    <value>200</value>
-    <description>The maximum amount of time, in seconds, to wait before a task asks an AM for
-      another task
-    </description>
-  </property>
-
-  <!-- Client Submission timeout value when submitting DAGs to a session -->
-  <property>
-    <name>tez.session.client.timeout.secs</name>
-    <value>180</value>
-    <description>Time (in seconds) to wait for AM to come up when trying to submit a DAG from
-      the client
-    </description>
-  </property>
-
-  <property>
-    <name>tez.session.am.dag.submit.timeout.secs</name>
-    <value>300</value>
-    <description>Time (in seconds) for which the Tez AM should wait for a DAG to be submitted
-      before shutting down
-    </description>
-  </property>
-
-
-  <!-- Configuration for runtime components -->
-
-  <!-- These properties can be set on a per edge basis by configuring the payload for each
-       edge independently. -->
-
-  <property>
-    <name>tez.runtime.intermediate-output.should-compress</name>
-    <value>false</value>
-    <description>Whether intermediate output should be compressed or not</description>
-  </property>
-
-  <property>
-    <name>tez.runtime.intermediate-output.compress.codec</name>
-    <value>org.apache.hadoop.io.compress.SnappyCodec</value>
-    <description>The coded to be used if compressing intermediate output. Only
-      applicable if tez.runtime.intermediate-output.should-compress is enabled.
-    </description>
-  </property>
-
-  <property>
-    <name>tez.runtime.intermediate-input.is-compressed</name>
-    <value>false</value>
-    <description>Whether intermediate input is compressed</description>
-  </property>
-
-  <property>
-    <name>tez.runtime.intermediate-input.compress.codec</name>
-    <value>org.apache.hadoop.io.compress.SnappyCodec</value>
-    <description>The coded to be used when reading intermediate compressed input.
-      Only applicable if tez.runtime.intermediate-input.is-compressed is enabled.
-    </description>
-  </property>
-
-  <property>
-    <name>tez.runtime.job.counters.max</name>
-    <value>10000</value>
-  </property>
-  <property>
-    <name>tez.runtime.job.counters.groups.max</name>
-    <value>10000</value>
-  </property>
-
-  <!-- Configuration for ATS integration -->
-
-  <property>
-    <name>tez.yarn.ats.enabled</name>
-    <value>true</value>
-    <description>Whether to send history events to YARN Application Timeline Server</description>
-  </property>
-
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/TEZ/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/TEZ/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/TEZ/metainfo.xml
index 1ca4d56..cbd6a30 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/TEZ/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/TEZ/metainfo.xml
@@ -20,27 +20,13 @@
   <services>
     <service>
       <name>TEZ</name>
-      <displayName>Tez</displayName>
-      <comment>Tez is the next generation Hadoop Query Processing framework written on top of YARN.</comment>
-      <version>0.4.0.2.1</version>
+      <extends>common-services/TEZ/0.4.0.2.1</extends>
+      <version>0.4.0.2.1.1.0</version>
       <components>
         <component>
           <name>TEZ_CLIENT</name>
-          <displayName>Tez Client</displayName>
-          <cardinality>1+</cardinality>
-          <category>CLIENT</category>
-          <commandScript>
-            <script>scripts/tez_client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
           <configFiles>
             <configFile>
-              <type>xml</type>
-              <fileName>tez-site.xml</fileName>
-              <dictionaryName>tez-site</dictionaryName>
-            </configFile>
-            <configFile>
               <type>env</type>
               <fileName>tez-env.cmd</fileName>
               <dictionaryName>tez-env</dictionaryName>
@@ -48,16 +34,6 @@
           </configFiles>
         </component>
       </components>
-
-      <requiredServices>
-        <service>YARN</service>
-      </requiredServices>
-
-      <configuration-dependencies>
-        <config-type>tez-site</config-type>
-        <config-type>tez-env</config-type>
-      </configuration-dependencies>
-
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/metainfo.xml
index a1a4804..9c41776 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/YARN/metainfo.xml
@@ -23,7 +23,7 @@
       <name>YARN</name>
       <displayName>YARN</displayName>
       <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
-      <version>2.1.0.2.0</version>
+      <version>2.4.0.2.1.1.0</version>
       <components>
 
         <component>
@@ -133,7 +133,7 @@
       <name>MAPREDUCE2</name>
       <displayName>MapReduce2</displayName>
       <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
-      <version>2.1.0.2.0.6.0</version>
+      <version>2.4.0.2.1.1.0</version>
       <components>
         <component>
           <name>HISTORYSERVER</name>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/configuration/zoo.cfg.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/configuration/zoo.cfg.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/configuration/zoo.cfg.xml
index a28d3b2..244f75e 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/configuration/zoo.cfg.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/configuration/zoo.cfg.xml
@@ -22,28 +22,8 @@
 
 <configuration>
   <property>
-    <name>tickTime</name>
-    <value>2000</value>
-    <description>The length of a single tick in milliseconds, which is the basic time unit used by ZooKeeper</description>
-  </property>
-  <property>
-    <name>initLimit</name>
-    <value>10</value>
-    <description>Ticks to allow for sync at Init.</description>
-  </property>
-  <property>
-    <name>syncLimit</name>
-    <value>5</value>
-    <description>Ticks to allow for sync at Runtime.</description>
-  </property>
-  <property>
-    <name>clientPort</name>
-    <value>2181</value>
-    <description>Port for running ZK Server.</description>
-  </property>
-  <property>
     <name>dataDir</name>
-    <value>C:\\\\hadoop\\\\zookeeper</value>
+    <value>c:\hadoop\zookeeper</value>
     <description>Data directory for ZooKeeper.</description>
   </property>
 </configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/configuration/zookeeper-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/configuration/zookeeper-env.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/configuration/zookeeper-env.xml
index a8964fa..64dd971 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/configuration/zookeeper-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/configuration/zookeeper-env.xml
@@ -23,25 +23,51 @@
 <configuration>
   <property>
     <name>zk_user</name>
-    <value>zookeeper</value>
-    <description>ZooKeeper User.</description>
+    <deleted>true</deleted>
+  </property>
+  <property>
+    <name>zk_log_dir</name>
+    <value>c:\hadoop\log\zookeeper</value>
+    <description>ZooKeeper Log Dir</description>
+  </property>
+  <property>
+    <name>zk_pid_dir</name>
+    <value>c:\hadoop\run\zookeeper</value>
+    <description>ZooKeeper Pid Dir</description>
   </property>
   <!-- zookeeper-env.sh -->
   <property>
     <name>content</name>
     <description>zookeeper-env.sh content</description>
     <value>
-export JAVA_HOME={{java64_home}}
-export ZOO_LOG_DIR={{zk_log_dir}}
-export ZOOPIDFILE={{zk_pid_file}}
-export SERVER_JVMFLAGS={{zk_server_heapsize}}
-export JAVA=$JAVA_HOME/bin/java
-export CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements.  See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License.  You may obtain a copy of the License at
+@rem
+@rem     http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+@rem
+
+@rem Set environment variables here.
+
+@rem JVM parameters for both client and server
+@rem JDK6 on Windows has a known bug for IPv6, use preferIPv4Stack unless JDK7.
+set JVMFLAGS=-Djava.net.preferIPv4Stack=true
+
+@rem Client specific JVM parameters
+@rem set CLIENT_JVMFLAGS=
 
-{% if security_enabled %}
-export SERVER_JVMFLAGS="$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}"
-export CLIENT_JVMFLAGS="$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}"
-{% endif %}
+@rem Server specific JVM parameters
+@rem set SERVER_JVMFLAGS=
     </value>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/configuration/zookeeper-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/configuration/zookeeper-log4j.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/configuration/zookeeper-log4j.xml
deleted file mode 100644
index 4dce6d1..0000000
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/configuration/zookeeper-log4j.xml
+++ /dev/null
@@ -1,100 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-
-  <property>
-    <name>content</name>
-    <value>
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-#
-# ZooKeeper Logging Configuration
-#
-
-# DEFAULT: console appender only
-log4j.rootLogger=INFO, CONSOLE
-
-# Example with rolling log file
-#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE
-
-# Example with rolling log file and tracing
-#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE
-
-#
-# Log INFO level and above messages to the console
-#
-log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
-log4j.appender.CONSOLE.Threshold=INFO
-log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
-log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
-
-#
-# Add ROLLINGFILE to rootLogger to get log file output
-#    Log DEBUG level and above messages to a log file
-log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender
-log4j.appender.ROLLINGFILE.Threshold=DEBUG
-log4j.appender.ROLLINGFILE.File=zookeeper.log
-
-# Max log file size of 10MB
-log4j.appender.ROLLINGFILE.MaxFileSize=10MB
-# uncomment the next line to limit number of backup files
-#log4j.appender.ROLLINGFILE.MaxBackupIndex=10
-
-log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout
-log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
-
-
-#
-# Add TRACEFILE to rootLogger to get log file output
-#    Log DEBUG level and above messages to a log file
-log4j.appender.TRACEFILE=org.apache.log4j.FileAppender
-log4j.appender.TRACEFILE.Threshold=TRACE
-log4j.appender.TRACEFILE.File=zookeeper_trace.log
-
-log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout
-### Notice we are including log4j's NDC here (%x)
-log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n
-    </value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/metainfo.xml
index daae2a7..afcc9cb 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/ZOOKEEPER/metainfo.xml
@@ -20,59 +20,21 @@
   <services>
     <service>
       <name>ZOOKEEPER</name>
-      <displayName>ZooKeeper</displayName>
-      <comment>Centralized service which provides highly reliable distributed coordination</comment>
-      <version>3.4.5.2.0</version>
+      <extends>common-services/ZOOKEEPER/3.4.5.2.0</extends>
+      <version>3.4.5.2.1.1.0</version>
       <components>
 
         <component>
-          <name>ZOOKEEPER_SERVER</name>
-          <displayName>ZooKeeper Server</displayName>
-          <category>MASTER</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts/zookeeper_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
           <name>ZOOKEEPER_CLIENT</name>
-          <displayName>ZooKeeper Client</displayName>
-          <category>CLIENT</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts/zookeeper_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
           <configFiles>
             <configFile>
               <type>env</type>
               <fileName>zookeeper-env.cmd</fileName>
               <dictionaryName>zookeeper-env</dictionaryName>
             </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>log4j.properties</fileName>
-              <dictionaryName>zookeeper-log4j</dictionaryName>
-            </configFile>
           </configFiles>
         </component>
       </components>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <configuration-dependencies>
-        <config-type>zookeeper-log4j</config-type>
-        <config-type>zookeeper-env</config-type>
-        <config-type>zoo.cfg</config-type>
-      </configuration-dependencies>
-      <restartRequiredAfterChange>true</restartRequiredAfterChange>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-web/app/utils/validator.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/validator.js b/ambari-web/app/utils/validator.js
index 3e84982..b7bfbf7 100644
--- a/ambari-web/app/utils/validator.js
+++ b/ambari-web/app/utils/validator.js
@@ -47,8 +47,8 @@ module.exports = {
    */
   isValidDir: function(value){
     var floatRegex = /^\/[0-9a-z]*/;
-    var winRegex = /^[a-z]:\\[0-9a-z]*/;
-    var winUrlRegex = /^file:\/\/\/[a-z]:\/[0-9a-z]*/;
+    var winRegex = /^[a-z]:\\[0-9a-zA-Z]*/;
+    var winUrlRegex = /^file:\/\/\/[a-zA-Z]:\/[0-9a-zA-Z]*/;
     var dirs = value.replace(/,/g,' ').trim().split(new RegExp("\\s+", "g"));
     for(var i = 0; i < dirs.length; i++){
       if(!floatRegex.test(dirs[i]) && !winRegex.test(dirs[i]) && !winUrlRegex.test(dirs[i])){
@@ -65,9 +65,11 @@ module.exports = {
    */
   isValidDataNodeDir: function(value) {
     var dirRegex = /^(\[[0-9a-zA-Z]+\])?(\/[0-9a-z]*)/;
+    var winRegex = /^(\[[0-9a-zA-Z]+\])?[a-zA-Z]:\\[0-9a-zA-Z]*/;
+    var winUrlRegex = /^(\[[0-9a-zA-Z]+\])?file:\/\/\/[a-zA-Z]:\/[0-9a-zA-Z]*/;
     var dirs = value.replace(/,/g,' ').trim().split(new RegExp("\\s+", "g"));
     for(var i = 0; i < dirs.length; i++){
-      if(!dirRegex.test(dirs[i])){
+      if(!dirRegex.test(dirs[i]) && !winRegex.test(dirs[i]) && !winUrlRegex.test(dirs[i])){
         return false;
       }
     }


[3/3] ambari git commit: AMBARI-8876: Common Services: Refactor HDPWIN 2.1 stack to use common services (Jayush Luniya)

Posted by jl...@apache.org.
AMBARI-8876: Common Services: Refactor HDPWIN 2.1 stack to use common services (Jayush Luniya)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/af6f6e87
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/af6f6e87
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/af6f6e87

Branch: refs/heads/trunk
Commit: af6f6e877209d5ab5d25fe2858259c234137eae5
Parents: 8567905
Author: Jayush Luniya <jl...@hortonworks.com>
Authored: Tue Dec 30 21:44:28 2014 -0800
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Tue Dec 30 21:44:28 2014 -0800

----------------------------------------------------------------------
 .../FALCON/configuration/falcon-env.xml         |  68 +-
 .../configuration/falcon-runtime.properties.xml |  47 --
 .../configuration/falcon-startup.properties.xml | 207 -------
 .../FALCON/configuration/oozie-site.xml         | 167 -----
 .../HDPWIN/2.1/services/FALCON/metainfo.xml     |  70 +--
 .../services/HBASE/configuration/hbase-env.xml  |  29 +-
 .../HBASE/configuration/hbase-policy.xml        |  53 --
 .../services/HBASE/configuration/hbase-site.xml | 292 +--------
 .../HDPWIN/2.1/services/HBASE/metainfo.xml      |  93 +--
 .../services/HDFS/configuration/core-site.xml   | 213 +------
 .../services/HDFS/configuration/hadoop-env.xml  | 119 ++++
 .../HDFS/configuration/hadoop-policy.xml        | 219 -------
 .../services/HDFS/configuration/hdfs-site.xml   | 274 ++-------
 .../HDPWIN/2.1/services/HDFS/metainfo.xml       | 128 +---
 .../services/HIVE/configuration/hcat-env.xml    |  31 +
 .../services/HIVE/configuration/hive-env.xml    |  42 +-
 .../services/HIVE/configuration/hive-site.xml   | 282 ++++-----
 .../services/HIVE/configuration/webhcat-env.xml |  31 +
 .../HIVE/configuration/webhcat-site.xml         |  51 +-
 .../HDPWIN/2.1/services/HIVE/metainfo.xml       | 165 +----
 .../services/OOZIE/configuration/oozie-env.xml  |  21 +-
 .../OOZIE/configuration/oozie-log4j.xml         |  96 ---
 .../services/OOZIE/configuration/oozie-site.xml | 540 +++-------------
 .../HDPWIN/2.1/services/OOZIE/metainfo.xml      |  91 +--
 .../services/PIG/configuration/pig-log4j.xml    |  61 --
 .../PIG/configuration/pig-properties.xml        | 262 --------
 .../stacks/HDPWIN/2.1/services/PIG/metainfo.xml |  39 +-
 .../services/SQOOP/configuration/sqoop-env.xml  |  10 +-
 .../HDPWIN/2.1/services/SQOOP/metainfo.xml      |  45 +-
 .../services/STORM/configuration/storm-env.xml  |  47 ++
 .../services/STORM/configuration/storm-site.xml | 616 +------------------
 .../HDPWIN/2.1/services/STORM/metainfo.xml      |  66 +-
 .../2.1/services/TEZ/configuration/tez-env.xml  |  36 ++
 .../2.1/services/TEZ/configuration/tez-site.xml | 193 +-----
 .../stacks/HDPWIN/2.1/services/TEZ/metainfo.xml |  28 +-
 .../HDPWIN/2.1/services/YARN/metainfo.xml       |   4 +-
 .../ZOOKEEPER/configuration/zoo.cfg.xml         |  22 +-
 .../ZOOKEEPER/configuration/zookeeper-env.xml   |  50 +-
 .../ZOOKEEPER/configuration/zookeeper-log4j.xml | 100 ---
 .../HDPWIN/2.1/services/ZOOKEEPER/metainfo.xml  |  42 +-
 ambari-web/app/utils/validator.js               |   8 +-
 41 files changed, 641 insertions(+), 4317 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/falcon-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/falcon-env.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/falcon-env.xml
index 0a12051..6ececc6 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/falcon-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/falcon-env.xml
@@ -21,89 +21,35 @@
 <configuration>
   <property>
     <name>falcon_user</name>
-    <value>falcon</value>
-    <description>Falcon user.</description>
-  </property>
-  <property>
-    <name>falcon_port</name>
-    <value>15000</value>
-    <description>Port the Falcon Server listens on.</description>
+    <deleted>true</deleted>
   </property>
   <property>
     <name>falcon_log_dir</name>
-    <value>/var/log/falcon</value>
+    <value>c:\hadoop\logs\falcon</value>
     <description>Falcon log directory.</description>
   </property>
   <property>
     <name>falcon_pid_dir</name>
-    <value>/var/run/falcon</value>
+    <value>c:\hadoop\run\falcon</value>
     <description>Falcon pid-file directory.</description>
   </property>
   <property>
     <name>falcon_local_dir</name>
-    <value>/hadoop/falcon</value>
+    <value>c:\hadoop\falcon</value>
     <description>Directory where Falcon data, such as activemq data, is stored.</description>
   </property>
   <!--embeddedmq properties-->
   <property>
     <name>falcon.embeddedmq.data</name>
-    <value>/hadoop/falcon/embeddedmq/data</value>
+    <value>c:\hadoop\falcon\embeddedmq\data</value>
     <description>Directory in which embeddedmq data is stored.</description>
   </property>
-  <property>
-    <name>falcon.embeddedmq</name>
-    <value>true</value>
-    <description>Whether embeddedmq is enabled or not.</description>
-  </property>
-  <property>
-    <name>falcon.emeddedmq.port</name>
-    <value>61616</value>
-    <description>Port that embeddedmq will listen on.</description>
-  </property>
 
-  <!-- falcon-env.sh -->
+  <!-- falcon-env.cmd -->
   <property>
     <name>content</name>
-    <description>falcon-env.sh content</description>
+    <description>This is the jinja template for falcon-env.cmd file</description>
     <value>
-# The java implementation to use. If JAVA_HOME is not found we expect java and jar to be in path
-export JAVA_HOME={{java_home}}
-
-# any additional java opts you want to set. This will apply to both client and server operations
-#export FALCON_OPTS=
-
-# any additional java opts that you want to set for client only
-#export FALCON_CLIENT_OPTS=
-
-# java heap size we want to set for the client. Default is 1024MB
-#export FALCON_CLIENT_HEAP=
-
-# any additional opts you want to set for prisim service.
-#export FALCON_PRISM_OPTS=
-
-# java heap size we want to set for the prisim service. Default is 1024MB
-#export FALCON_PRISM_HEAP=
-
-# any additional opts you want to set for falcon service.
-export FALCON_SERVER_OPTS="-Dfalcon.embeddedmq={{falcon_embeddedmq_enabled}} -Dfalcon.emeddedmq.port={{falcon_emeddedmq_port}}"
-
-# java heap size we want to set for the falcon server. Default is 1024MB
-#export FALCON_SERVER_HEAP=
-
-# What is is considered as falcon home dir. Default is the base locaion of the installed software
-#export FALCON_HOME_DIR=
-
-# Where log files are stored. Defatult is logs directory under the base install location
-export FALCON_LOG_DIR={{falcon_log_dir}}
-
-# Where pid files are stored. Defatult is logs directory under the base install location
-export FALCON_PID_DIR={{falcon_pid_dir}}
-
-# where the falcon active mq data is stored. Defatult is logs/data directory under the base install location
-export FALCON_DATA_DIR={{falcon_embeddedmq_data}}
-
-# Where do you want to expand the war file. By Default it is in /server/webapp dir under the base install dir.
-#export FALCON_EXPANDED_WEBAPP_DIR=
     </value>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/falcon-runtime.properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/falcon-runtime.properties.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/falcon-runtime.properties.xml
deleted file mode 100644
index 94c8755..0000000
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/falcon-runtime.properties.xml
+++ /dev/null
@@ -1,47 +0,0 @@
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false">
-  <property>
-    <name>*.domain</name>
-    <value>${falcon.app.type}</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.log.cleanup.frequency.minutes.retention</name>
-    <value>hours(6)</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.log.cleanup.frequency.hours.retention</name>
-    <value>minutes(1)</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.log.cleanup.frequency.days.retention</name>
-    <value>days(7)</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.log.cleanup.frequency.months.retention</name>
-    <value>months(3)</value>
-    <description></description>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/falcon-startup.properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/falcon-startup.properties.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/falcon-startup.properties.xml
deleted file mode 100644
index 7459429..0000000
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/falcon-startup.properties.xml
+++ /dev/null
@@ -1,207 +0,0 @@
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false">
-  <!--advanced properties-->
-  <property>
-    <name>*.workflow.engine.impl</name>
-    <value>org.apache.falcon.workflow.engine.OozieWorkflowEngine</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.oozie.process.workflow.builder</name>
-    <value>org.apache.falcon.workflow.OozieProcessWorkflowBuilder</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.oozie.feed.workflow.builder</name>
-    <value>org.apache.falcon.workflow.OozieFeedWorkflowBuilder</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.SchedulableEntityManager.impl</name>
-    <value>org.apache.falcon.resource.SchedulableEntityManager</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.ConfigSyncService.impl</name>
-    <value>org.apache.falcon.resource.ConfigSyncService</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.ProcessInstanceManager.impl</name>
-    <value>org.apache.falcon.resource.InstanceManager</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.catalog.service.impl</name>
-    <value>org.apache.falcon.catalog.HiveCatalogService</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.application.services</name>
-    <value>org.apache.falcon.security.AuthenticationInitializationService,\
-      org.apache.falcon.service.ProcessSubscriberService,\
-      org.apache.falcon.entity.store.ConfigurationStore,\
-      org.apache.falcon.rerun.service.RetryService,\
-      org.apache.falcon.rerun.service.LateRunService,\
-      org.apache.falcon.service.LogCleanupService
-    </value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.configstore.listeners</name>
-    <value>org.apache.falcon.entity.v0.EntityGraph,\
-      org.apache.falcon.entity.ColoClusterRelation,\
-      org.apache.falcon.group.FeedGroupMap,\
-      org.apache.falcon.service.SharedLibraryHostingService
-    </value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.broker.impl.class</name>
-    <value>org.apache.activemq.ActiveMQConnectionFactory</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.shared.libs</name>
-    <value>activemq-core,ant,geronimo-j2ee-management,hadoop-distcp,jms,json-simple,oozie-client,spring-jms</value>
-    <description></description>
-  </property>
-  <!--common properties-->
-  <property>
-    <name>*.domain</name>
-    <value>${falcon.app.type}</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.config.store.uri</name>
-    <value>file:///hadoop/falcon/store</value>
-    <description>Location to store user entity configurations</description>
-  </property>
-  <property>
-    <name>*.system.lib.location</name>
-    <value>${falcon.home}/server/webapp/${falcon.app.type}/WEB-INF/lib</value>
-    <description>Location of libraries that is shipped to Hadoop</description>
-  </property>
-  <property>
-    <name>*.retry.recorder.path</name>
-    <value>${falcon.log.dir}/retry</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.falcon.cleanup.service.frequency</name>
-    <value>days(1)</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.broker.url</name>
-    <value>tcp://localhost:61616</value>
-    <description>Default Active MQ url</description>
-  </property>
-  <property>
-    <name>*.broker.ttlInMins</name>
-    <value>4320</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.entity.topic</name>
-    <value>FALCON.ENTITY.TOPIC</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.max.retry.failure.count</name>
-    <value>1</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.internal.queue.size</name>
-    <value>1000</value>
-    <description></description>
-  </property>
-  <!--properties without default values-->
-  <property>
-    <name>*.falcon.http.authentication.cookie.domain</name>
-    <value>EXAMPLE.COM</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.falcon.http.authentication.blacklisted.users</name>
-    <value></value>
-    <description>Comma separated list of black listed users</description>
-  </property>
-  <!--authentication properties-->
-  <property>
-    <name>*.falcon.authentication.type</name>
-    <value>simple</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.falcon.http.authentication.type</name>
-    <value>simple</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.falcon.http.authentication.token.validity</name>
-    <value>36000</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.falcon.http.authentication.signature.secret</name>
-    <value>falcon</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.falcon.http.authentication.simple.anonymous.allowed</name>
-    <value>true</value>
-    <description>Indicates if anonymous requests are allowed when using 'simple' authentication</description>
-  </property>
-  <property>
-    <name>*.falcon.http.authentication.kerberos.name.rules</name>
-    <value>DEFAULT</value>
-    <description>The kerberos names rules is to resolve kerberos principal names, refer to Hadoop's KerberosName for more details.</description>
-  </property>
-  <!--kerberos params, must be set during security enabling-->
-  <property>
-    <name>*.falcon.service.authentication.kerberos.principal</name>
-    <value>falcon/_HOST@EXAMPLE.COM</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.falcon.service.authentication.kerberos.keytab</name>
-    <value>/etc/security/keytabs/falcon.service.keytab</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.dfs.namenode.kerberos.principal</name>
-    <value>nn/_HOST@EXAMPLE.COM</value>
-    <description>name node principal to talk to config store</description>
-  </property>
-  <property>
-    <name>*.falcon.http.authentication.kerberos.principal</name>
-    <value>HTTP/_HOST@EXAMPLE.COM</value>
-    <description>Indicates the Kerberos principal to be used for HTTP endpoint</description>
-  </property>
-  <property>
-    <name>*.falcon.http.authentication.kerberos.keytab</name>
-    <value>/etc/security/keytabs/spnego.service.keytab</value>
-    <description>Location of the keytab file with the credentials for the HTTP principal</description>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/oozie-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/oozie-site.xml
deleted file mode 100644
index 4b0bf70..0000000
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/oozie-site.xml
+++ /dev/null
@@ -1,167 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<configuration supports_final="true">
-  <property>
-    <name>oozie.service.ELService.ext.functions.coord-job-submit-instances</name>
-    <value>
-      now=org.apache.oozie.extensions.OozieELExtensions#ph1_now_echo,
-      today=org.apache.oozie.extensions.OozieELExtensions#ph1_today_echo,
-      yesterday=org.apache.oozie.extensions.OozieELExtensions#ph1_yesterday_echo,
-      currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_currentMonth_echo,
-      lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_lastMonth_echo,
-      currentYear=org.apache.oozie.extensions.OozieELExtensions#ph1_currentYear_echo,
-      lastYear=org.apache.oozie.extensions.OozieELExtensions#ph1_lastYear_echo,
-      formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo,
-      latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,
-      future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo
-    </value>
-    <description>
-      EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
-      This property is a convenience property to add extensions to the built in executors without having to
-      include all the built in ones.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.ELService.ext.functions.coord-action-create-inst</name>
-    <value>
-      now=org.apache.oozie.extensions.OozieELExtensions#ph2_now_inst,
-      today=org.apache.oozie.extensions.OozieELExtensions#ph2_today_inst,
-      yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday_inst,
-      currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth_inst,
-      lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth_inst,
-      currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear_inst,
-      lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear_inst,
-      latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,
-      future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo,
-      formatTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_formatTime,
-      user=org.apache.oozie.coord.CoordELFunctions#coord_user
-    </value>
-    <description>
-      EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
-      This property is a convenience property to add extensions to the built in executors without having to
-      include all the built in ones.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.ELService.ext.functions.coord-action-create</name>
-    <value>
-      now=org.apache.oozie.extensions.OozieELExtensions#ph2_now,
-      today=org.apache.oozie.extensions.OozieELExtensions#ph2_today,
-      yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday,
-      currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth,
-      lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth,
-      currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear,
-      lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear,
-      latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,
-      future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo,
-      formatTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_formatTime,
-      user=org.apache.oozie.coord.CoordELFunctions#coord_user
-    </value>
-    <description>
-      EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
-      This property is a convenience property to add extensions to the built in executors without having to
-      include all the built in ones.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.ELService.ext.functions.coord-job-submit-data</name>
-    <value>
-      now=org.apache.oozie.extensions.OozieELExtensions#ph1_now_echo,
-      today=org.apache.oozie.extensions.OozieELExtensions#ph1_today_echo,
-      yesterday=org.apache.oozie.extensions.OozieELExtensions#ph1_yesterday_echo,
-      currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_currentMonth_echo,
-      lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_lastMonth_echo,
-      currentYear=org.apache.oozie.extensions.OozieELExtensions#ph1_currentYear_echo,
-      lastYear=org.apache.oozie.extensions.OozieELExtensions#ph1_lastYear_echo,
-      dataIn=org.apache.oozie.extensions.OozieELExtensions#ph1_dataIn_echo,
-      instanceTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_nominalTime_echo_wrap,
-      formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo,
-      dateOffset=org.apache.oozie.coord.CoordELFunctions#ph1_coord_dateOffset_echo,
-      user=org.apache.oozie.coord.CoordELFunctions#coord_user
-    </value>
-    <description>
-      EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT.
-      This property is a convenience property to add extensions to the built in executors without having to
-      include all the built in ones.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.ELService.ext.functions.coord-action-start</name>
-    <value>
-      now=org.apache.oozie.extensions.OozieELExtensions#ph2_now,
-      today=org.apache.oozie.extensions.OozieELExtensions#ph2_today,
-      yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday,
-      currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth,
-      lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth,
-      currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear,
-      lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear,
-      latest=org.apache.oozie.coord.CoordELFunctions#ph3_coord_latest,
-      future=org.apache.oozie.coord.CoordELFunctions#ph3_coord_future,
-      dataIn=org.apache.oozie.extensions.OozieELExtensions#ph3_dataIn,
-      instanceTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_nominalTime,
-      dateOffset=org.apache.oozie.coord.CoordELFunctions#ph3_coord_dateOffset,
-      formatTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_formatTime,
-      user=org.apache.oozie.coord.CoordELFunctions#coord_user
-    </value>
-    <description>
-      EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
-      This property is a convenience property to add extensions to the built in executors without having to
-      include all the built in ones.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.ELService.ext.functions.coord-sla-submit</name>
-    <value>
-      instanceTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_nominalTime_echo_fixed,
-      user=org.apache.oozie.coord.CoordELFunctions#coord_user
-    </value>
-    <description>
-      EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.ELService.ext.functions.coord-sla-create</name>
-    <value>
-      instanceTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_nominalTime,
-      user=org.apache.oozie.coord.CoordELFunctions#coord_user
-    </value>
-    <description>
-      EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
-    </description>
-  </property>
-  <!--web ui should add following properties to oozie site accordingly to FALCON_USER-->
-  <!--<property>-->
-    <!--<name>oozie.service.ProxyUserService.proxyuser.#FALCON_USER#.hosts</name>-->
-    <!--<value>*</value>-->
-    <!--<description>Falcon proxyuser hosts</description>-->
-  <!--</property>-->
-
-  <!--<property>-->
-    <!--<name>oozie.service.ProxyUserService.proxyuser.#FALCON_USER#.groups</name>-->
-    <!--<value>*</value>-->
-    <!--<description>Falcon proxyuser groups</description>-->
-  <!--</property>-->
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/metainfo.xml
index 7938777..14b4c82 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/metainfo.xml
@@ -21,86 +21,20 @@
     <service>
       <name>FALCON</name>
       <displayName>Falcon</displayName>
-      <comment>Data management and processing platform</comment>
-      <version>0.5.0.2.1</version>
+      <extends>common-services/FALCON/0.5.0.2.1</extends>
+      <version>0.5.0.2.1.1.0</version>
       <components>
         <component>
           <name>FALCON_CLIENT</name>
-          <displayName>Falcon Client</displayName>
-          <category>CLIENT</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts/falcon_client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
           <configFiles>
             <configFile>
               <type>env</type>
               <fileName>falcon-env.cmd</fileName>
               <dictionaryName>falcon-env</dictionaryName>
             </configFile>
-            <configFile>
-              <type>xml</type>
-              <fileName>runtime.properties</fileName>
-              <dictionaryName>falcon-runtime.properties</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>xml</type>
-              <fileName>startup.properties</fileName>
-              <dictionaryName>falcon-startup.properties</dictionaryName>
-            </configFile>
           </configFiles>
         </component>
-        <component>
-          <name>FALCON_SERVER</name>
-          <displayName>Falcon Server</displayName>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <dependencies>
-            <dependency>
-              <name>OOZIE/OOZIE_SERVER</name>
-              <scope>cluster</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>OOZIE/OOZIE_CLIENT</name>
-              <scope>cluster</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/falcon_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
       </components>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <requiredServices>
-        <service>OOZIE</service>
-      </requiredServices>
-
-      <configuration-dependencies>
-        <config-type>falcon-env</config-type>
-        <config-type>falcon-startup.properties</config-type>
-        <config-type>falcon-runtime.properties</config-type>
-      </configuration-dependencies>
-
-      <excluded-config-types>
-        <config-type>oozie-site</config-type>
-      </excluded-config-types>
-
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/configuration/hbase-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/configuration/hbase-env.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/configuration/hbase-env.xml
index fc2ed7f..642e746 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/configuration/hbase-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/configuration/hbase-env.xml
@@ -23,39 +23,18 @@
 <configuration>
   <property>
     <name>hbase_log_dir</name>
-    <value>/var/log/hbase</value>
+    <value>c:\hadoop\logs\hbase</value>
     <description>Log Directories for HBase.</description>
   </property>
   <property>
     <name>hbase_pid_dir</name>
-    <value>/var/run/hbase</value>
+    <value>c:\hadoop\run\hbase</value>
     <description>Pid Directory for HBase.</description>
   </property>
+
   <property>
-    <name>hbase_regionserver_heapsize</name>
-    <value>1024</value>
-    <description>HBase RegionServer Heap Size.</description>
-  </property>
-  <property>
-    <name>hbase_regionserver_xmn_max</name>
-    <value>512</value>
-    <description>HBase RegionServer maximum value for minimum heap size.</description>
-  </property>
-  <property>
-    <name>hbase_regionserver_xmn_ratio</name>
-    <value>0.2</value>
-    <description>HBase RegionServer minimum heap size is calculated as a percentage of max heap size.</description>
-  </property>
-  <property>
-    <name>hbase_master_heapsize</name>
-    <value>1024</value>
-    <description>HBase Master Heap Size</description>
-  </property>
-   <property>
     <name>hbase_user</name>
-    <value>hbase</value>
-    <property-type>USER</property-type>
-    <description>HBase User Name.</description>
+    <deleted>true</deleted>
   </property>
 
   <!-- hbase-env.cmd -->

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/configuration/hbase-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/configuration/hbase-policy.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/configuration/hbase-policy.xml
deleted file mode 100644
index b0807b6..0000000
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/configuration/hbase-policy.xml
+++ /dev/null
@@ -1,53 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="true">
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HRegionInterface protocol implementations (ie.
-    clients talking to HRegionServers)
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.admin.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HMasterInterface protocol implementation (ie.
-    clients talking to HMaster for admin operations).
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.masterregion.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HMasterRegionInterface protocol implementations
-    (for HRegionServers communicating with HMaster)
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/configuration/hbase-site.xml
index cbaaacd..d551d4d 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/configuration/hbase-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/configuration/hbase-site.xml
@@ -21,298 +21,16 @@
 -->
 <configuration supports_final="true">
   <property>
-    <name>hbase.rootdir</name>
-    <value>hdfs://localhost:8020/apps/hbase/data</value>
-    <description>The directory shared by region servers and into
-      which HBase persists.  The URL should be 'fully-qualified'
-      to include the filesystem scheme.  For example, to specify the
-      HDFS directory '/hbase' where the HDFS instance's namenode is
-      running at namenode.example.org on port 9000, set this value to:
-      hdfs://namenode.example.org:9000/hbase.  By default HBase writes
-      into /tmp.  Change this configuration else all data will be lost
-      on machine restart.
-    </description>
-  </property>
-  <property>
-    <name>hbase.cluster.distributed</name>
-    <value>true</value>
-    <description>The mode the cluster will be in. Possible values are
-      false for standalone mode and true for distributed mode.  If
-      false, startup will run all HBase and ZooKeeper daemons together
-      in the one JVM.
-    </description>
+    <name>dfs.domain.socket.path</name>
+    <deleted>true</deleted>
   </property>
   <property>
-    <name>hbase.master.port</name>
-    <value>60000</value>
-    <description>The port the HBase Master should bind to.</description>
-  </property>
-  <property >
     <name>hbase.tmp.dir</name>
-    <value>${java.io.tmpdir}/hbase-${user.name}</value>
+    <value>c:\hadoop\temp\hbase</value>
     <description>Temporary directory on the local filesystem.
       Change this setting to point to a location more permanent
-      than '/tmp', the usual resolve for java.io.tmpdir, as the
-      '/tmp' directory is cleared on machine restart.</description>
-  </property>
-  <property>
-    <name>hbase.local.dir</name>
-    <value>${hbase.tmp.dir}/local</value>
-    <description>Directory on the local filesystem to be used as a local storage
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.bindAddress</name>
-    <value>0.0.0.0</value>
-    <description>The bind address for the HBase Master web UI
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.port</name>
-    <value>60010</value>
-    <description>The port for the HBase Master web UI.</description>
-  </property>
-  <property>
-    <name>hbase.regionserver.info.port</name>
-    <value>60030</value>
-    <description>The port for the HBase RegionServer web UI.</description>
-  </property>
-  <property>
-    <name>hbase.regionserver.global.memstore.upperLimit</name>
-    <value>0.4</value>
-    <description>Maximum size of all memstores in a region server before new
-      updates are blocked and flushes are forced. Defaults to 40% of heap
+      than '/tmp' (The '/tmp' directory is often cleared on
+      machine restart).
     </description>
   </property>
-  <property>
-    <name>hbase.regionserver.handler.count</name>
-    <value>30</value>
-    <description>Count of RPC Listener instances spun up on RegionServers.
-      Same property is used by the Master for count of master handlers.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.majorcompaction</name>
-    <value>604800000</value>
-    <description>The time (in miliseconds) between 'major' compactions of all
-      HStoreFiles in a region.  Default: Set to 7 days.  Major compactions tend to
-      happen exactly when you need them least so enable them such that they run at
-      off-peak for your deploy; or, since this setting is on a periodicity that is
-      unlikely to match your loading, run the compactions via an external
-      invocation out of a cron job or some such.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.regionserver.global.memstore.lowerLimit</name>
-    <value>0.38</value>
-    <description>When memstores are being forced to flush to make room in
-      memory, keep flushing until we hit this mark. Defaults to 35% of heap.
-      This value equal to hbase.regionserver.global.memstore.upperLimit causes
-      the minimum possible flushing to occur when updates are blocked due to
-      memstore limiting.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.block.multiplier</name>
-    <value>2</value>
-    <description>Block updates if memstore has hbase.hregion.memstore.block.multiplier
-      time hbase.hregion.flush.size bytes.  Useful preventing
-      runaway memstore during spikes in update traffic.  Without an
-      upper-bound, memstore fills such that when it flushes the
-      resultant flush files take a long time to compact or split, or
-      worse, we OOME
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.flush.size</name>
-    <value>134217728</value>
-    <description>
-      Memstore will be flushed to disk if size of the memstore
-      exceeds this number of bytes.  Value is checked by a thread that runs
-      every hbase.server.thread.wakefrequency.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.mslab.enabled</name>
-    <value>true</value>
-    <description>
-      Enables the MemStore-Local Allocation Buffer,
-      a feature which works to prevent heap fragmentation under
-      heavy write loads. This can reduce the frequency of stop-the-world
-      GC pauses on large heaps.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.max.filesize</name>
-    <value>10737418240</value>
-    <description>
-      Maximum HStoreFile size. If any one of a column families' HStoreFiles has
-      grown to exceed this value, the hosting HRegion is split in two.
-      Default: 1G.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.scanner.caching</name>
-    <value>100</value>
-    <description>Number of rows that will be fetched when calling next
-      on a scanner if it is not served from (local, client) memory. Higher
-      caching values will enable faster scanners but will eat up more memory
-      and some calls of next may take longer and longer times when the cache is empty.
-      Do not set this value such that the time between invocations is greater
-      than the scanner timeout; i.e. hbase.regionserver.lease.period
-    </description>
-  </property>
-  <property>
-    <name>zookeeper.session.timeout</name>
-    <value>90000</value>
-    <description>ZooKeeper session timeout.
-      HBase passes this to the zk quorum as suggested maximum time for a
-      session (This setting becomes zookeeper's 'maxSessionTimeout').  See
-      http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
-      "The client sends a requested timeout, the server responds with the
-      timeout that it can give the client. " In milliseconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.keyvalue.maxsize</name>
-    <value>10485760</value>
-    <description>Specifies the combined maximum allowed size of a KeyValue
-      instance. This is to set an upper boundary for a single entry saved in a
-      storage file. Since they cannot be split it helps avoiding that a region
-      cannot be split any further because the data is too large. It seems wise
-      to set this to a fraction of the maximum region size. Setting it to zero
-      or less disables the check.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.compactionThreshold</name>
-    <value>3</value>
-    <description>
-      If more than this number of HStoreFiles in any one HStore
-      (one HStoreFile is written per flush of memstore) then a compaction
-      is run to rewrite all HStoreFiles files as one.  Larger numbers
-      put off compaction but when it runs, it takes longer to complete.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.flush.retries.number</name>
-    <value>120</value>
-    <description>
-      The number of times the region flush operation will be retried.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.hstore.blockingStoreFiles</name>
-    <value>10</value>
-    <description>
-      If more than this number of StoreFiles in any one Store
-      (one StoreFile is written per flush of MemStore) then updates are
-      blocked for this HRegion until a compaction is completed, or
-      until hbase.hstore.blockingWaitTime has been exceeded.
-    </description>
-  </property>
-  <property>
-    <name>hfile.block.cache.size</name>
-    <value>0.4</value>
-    <description>
-      Percentage of maximum heap (-Xmx setting) to allocate to block cache
-      used by HFile/StoreFile. Default of 0.25 means allocate 25%.
-      Set to 0 to disable but it's not recommended.
-    </description>
-  </property>
-
-
-  <!-- Additional configuration specific to HBase security -->
-  <property>
-    <name>hbase.security.authentication</name>
-    <value>simple</value>
-    <description>  Controls whether or not secure authentication is enabled for HBase. Possible values are 'simple'
-      (no authentication), and 'kerberos'.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.security.authorization</name>
-    <value>false</value>
-    <description>Enables HBase authorization. Set the value of this property to false to disable HBase authorization.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.region.classes</name>
-    <value></value>
-    <description>A comma-separated list of Coprocessors that are loaded by
-      default on all tables. For any override coprocessor method, these classes
-      will be called in order. After implementing your own Coprocessor, just put
-      it in HBase's classpath and add the fully qualified class name here.
-      A coprocessor can also be loaded on demand by setting HTableDescriptor.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.master.classes</name>
-    <value></value>
-    <description>A comma-separated list of
-      org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
-      loaded by default on the active HMaster process. For any implemented
-      coprocessor methods, the listed classes will be called in order. After
-      implementing your own MasterObserver, just put it in HBase's classpath
-      and add the fully qualified class name here.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.zookeeper.property.clientPort</name>
-    <value>2181</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-      The port at which the clients will connect.
-    </description>
-  </property>
-
-  <!--
-  The following three properties are used together to create the list of
-  host:peer_port:leader_port quorum servers for ZooKeeper.
-  -->
-  <property>
-    <name>hbase.zookeeper.quorum</name>
-    <value>localhost</value>
-    <description>Comma separated list of servers in the ZooKeeper Quorum.
-      For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
-      By default this is set to localhost for local and pseudo-distributed modes
-      of operation. For a fully-distributed setup, this should be set to a full
-      list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
-      this is the list of servers which we will start/stop ZooKeeper on.
-    </description>
-  </property>
-  <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
-
-  <property>
-    <name>hbase.zookeeper.useMulti</name>
-    <value>false</value>
-    <description>Instructs HBase to make use of ZooKeeper's multi-update functionality.
-      This allows certain ZooKeeper operations to complete more quickly and prevents some issues
-      with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).·
-      IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+
-      and will not be downgraded.  ZooKeeper versions before 3.4 do not support multi-update and will
-      not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).
-    </description>
-  </property>
-  <property>
-    <name>zookeeper.znode.parent</name>
-    <value>/hbase</value>
-    <description>Root ZNode for HBase in ZooKeeper. All of HBase's ZooKeeper
-      files that are configured with a relative path will go under this node.
-      By default, all of HBase's ZooKeeper file path are configured with a
-      relative path, so they will all go under this directory unless changed.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.defaults.for.version.skip</name>
-    <value>false</value>
-    <description>Disables version verification.</description>
-  </property>
-
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/metainfo.xml
index 29dd01f..741f744 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/metainfo.xml
@@ -20,109 +20,20 @@
   <services>
     <service>
       <name>HBASE</name>
-      <displayName>HBase</displayName>
-      <comment>Non-relational distributed database and centralized service for configuration management &amp;
-        synchronization
-      </comment>
-      <version>0.96.0.2.0</version>
+      <extends>common-services/HBASE/0.96.0.2.0</extends>
+      <version>0.98.0.2.1.1.0</version>
       <components>
         <component>
-          <name>HBASE_MASTER</name>
-          <displayName>HBase Master</displayName>
-          <category>MASTER</category>
-          <cardinality>1+</cardinality>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
-              <scope>cluster</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-                <co-locate>HBASE/HBASE_MASTER</co-locate>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts\hbase_master.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <customCommands>
-            <customCommand>
-              <name>DECOMMISSION</name>
-              <commandScript>
-                <script>scripts\hbase_master.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>600</timeout>
-              </commandScript>
-            </customCommand>
-          </customCommands>
-        </component>
-
-        <component>
-          <name>HBASE_REGIONSERVER</name>
-          <displayName>RegionServer</displayName>
-          <category>SLAVE</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts\hbase_regionserver.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-
-        <component>
           <name>HBASE_CLIENT</name>
-          <displayName>HBase Client</displayName>
-          <category>CLIENT</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts\hbase_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
           <configFiles>
             <configFile>
-              <type>xml</type>
-              <fileName>hbase-site.xml</fileName>
-              <dictionaryName>hbase-site</dictionaryName>
-            </configFile>
-            <configFile>
               <type>env</type>
               <fileName>hbase-env.cmd</fileName>
               <dictionaryName>hbase-env</dictionaryName>
             </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>log4j.properties</fileName>
-              <dictionaryName>hbase-log4j</dictionaryName>
-            </configFile>
           </configFiles>
         </component>
       </components>
-
-      <commandScript>
-        <script>scripts\service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <requiredServices>
-        <service>ZOOKEEPER</service>
-        <service>HDFS</service>
-      </requiredServices>
-
-      <configuration-dependencies>
-        <config-type>hbase-policy</config-type>
-        <config-type>hbase-site</config-type>
-        <config-type>hbase-env</config-type>
-        <config-type>hbase-log4j</config-type>
-      </configuration-dependencies>
-
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/core-site.xml
index 2dbda14..2d406fc 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/core-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/core-site.xml
@@ -1,202 +1,27 @@
 <?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!-- Put site-specific property overrides in this file. -->
-<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
-  <!-- i/o properties -->
-  <property>
-    <name>io.file.buffer.size</name>
-    <value>131072</value>
-    <description>The size of buffer for use in sequence files.
-  The size of this buffer should probably be a multiple of hardware
-  page size (4096 on Intel x86), and it determines how much data is
-  buffered during read and write operations.</description>
-  </property>
-  <property>
-    <name>io.serializations</name>
-    <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
-  </property>
-  <property>
-    <name>io.compression.codecs</name>
-    <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec</value>
-    <description>A list of the compression codec classes that can be used
-                 for compression/decompression.</description>
-  </property>
-  <!-- file system properties -->
-  <property>
-    <name>fs.defaultFS</name>
-    <value>hdfs://localhost:8020</value>
-    <description>The name of the default file system.  Either the
-  literal string "local" or a host:port for NDFS.</description>
-    <final>true</final>
-  </property>
-  <property>
-    <name>fs.trash.interval</name>
-    <value>360</value>
-    <description>Number of minutes after which the checkpoint
-      gets deleted.  If zero, the trash feature is disabled.
-    </description>
-  </property>
-  <property>
-    <name>fs.trash.checkpoint.interval</name>
-    <value>0</value>
-    <description>Number of minutes between trash checkpoints.
-      Should be smaller or equal to fs.trash.interval. If zero,
-      the value is set to the value of fs.trash.interval.
-    </description>
-  </property>
-  <property>
-    <name>fs.permissions.umask-mode</name>
-    <value>022</value>
-    <description>The umask used when creating files and directories.
-      Can be in octal or in symbolic. Examples are: "022" (octal for
-      u=rwx,g=r-x,o=r-x in symbolic), or "u=rwx,g=rwx,o=" (symbolic
-      for 007 in octal).</description>
-  </property>
-  <property>
-    <name>ipc.client.idlethreshold</name>
-    <value>8000</value>
-    <description>Defines the threshold number of connections after which
-               connections will be inspected for idleness.
-    </description>
-  </property>
-  <property>
-    <name>ipc.client.connection.maxidletime</name>
-    <value>30000</value>
-    <description>The maximum time after which a client will bring down the
-               connection to the server.
-    </description>
-  </property>
-  <property>
-    <name>ipc.client.connect.max.retries</name>
-    <value>50</value>
-    <description>Defines the maximum number of retries for IPC connections.</description>
-  </property>
-  <!-- Web Interface Configuration -->
-  <property>
-    <name>hadoop.http.staticuser.user</name>
-    <value>gopher</value>
-    <description>
-      The user name to filter as, on static web filters
-      while rendering content. An example use is the HDFS
-      web UI (user to be used for browsing files).
-    </description>
-  </property>
-  <property>
-    <name>webinterface.private.actions</name>
-    <value>false</value>
-    <description> If set to true, the web interfaces of RM and NN may contain
-                actions, such as kill job, delete file, etc., that should
-                not be exposed to public. Enable this option if the interfaces
-                are only reachable by those who have the right authorization.
-    </description>
-  </property>
-  <property>
-    <name>hadoop.security.authentication</name>
-    <value>simple</value>
-    <description>
-      Set the authentication for the cluster. Valid values are: simple or
-      kerberos.
-    </description>
-  </property>
-  <property>
-    <name>hadoop.security.authorization</name>
-    <value>false</value>
-    <description>
-     Enable authorization for different protocols.
-  </description>
-  </property>
-  <property>
-    <name>hadoop.proxyuser.hadoop.groups</name>
-    <value>HadoopUsers</value>
-    <description>
-     Proxy group for Hadoop.
-  </description>
-  </property>
-  <property>
-    <name>hadoop.ssl.enabled</name>
-    <value>false</value>
-  </property>
-  <property>
-    <name>hadoop.ssl.require.client.cert</name>
-    <value>false</value>
-  </property>
-  <property>
-    <name>hadoop.ssl.hostname.verifier</name>
-    <value>DEFAULT</value>
-  </property>
-  <property>
-    <name>hadoop.ssl.keystores.factory.class</name>
-    <value>org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory</value>
-  </property>
-  <property>
-    <name>hadoop.ssl.server.conf</name>
-    <value>ssl-server.xml</value>
-  </property>
-  <property>
-    <name>hadoop.ssl.client.conf</name>
-    <value>ssl-client.xml</value>
-  </property>
-  <property>
-    <name>hadoop.rpc.protection</name>
-    <value>authentication</value>
-  </property>
-  <property>
-    <name>hadoop.tmp.dir</name>
-    <value>c:\hdp\temp\hadoop</value>
-  </property>
 
-  <property>
-    <name>hadoop.proxyuser.hadoop.hosts</name>
-    <value>192.168.145.128</value>
-  </property>
-
-  <property>
-    <name>hadoop.security.auth_to_local</name>
-    <value>
-        RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/
-        RULE:[2:$1@$0](jhs@.*)s/.*/mapred/
-        RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/
-        RULE:[2:$1@$0](hm@.*)s/.*/hbase/
-        RULE:[2:$1@$0](rs@.*)s/.*/hbase/
-        DEFAULT
-    </value>
-<description>The mapping from kerberos principal names to local OS mapreduce.job.user.names.
-  So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
-  "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
-The translations rules have 3 sections:
-      base     filter    substitution
-The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
-
-[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
-[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
-[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
-
-The filter is a regex in parens that must the generated string for the rule to apply.
-
-"(.*%admin)" will take any string that ends in "%admin"
-"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
-
-Finally, the substitution is a sed rule to translate a regex into a fixed string.
+  <!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
 
-"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
-"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
-"s/X/Y/g" replaces all of the "X" in the name with "Y"
+       http://www.apache.org/licenses/LICENSE-2.0
 
-So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+  -->
 
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
-
-To also translate the names with a second component, you'd make the rules:
-
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-RULE:[2:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
-
-If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
-
-RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
-DEFAULT
-    </description>
+<!-- Put site-specific property overrides in this file. -->
+<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
+  <property>
+    <name>hadoop.tmp.dir</name>
+    <value>c:\hadoop\temp\hadoop</value>
   </property>
 </configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-env.xml
index b4b90d7..b5451d8 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-env.xml
@@ -26,8 +26,127 @@
 
 <configuration>
   <property>
+    <name>hdfs_log_dir_prefix</name>
+    <value>c:\hadoop\logs\hadoop</value>
+    <description>Hadoop Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>hadoop_pid_dir_prefix</name>
+    <value>c:\hadoop\run\hadoop</value>
+    <description>Hadoop PID Dir Prefix</description>
+  </property>
+  <property>
     <name>dfs.datanode.data.dir.mount.file</name>
     <value>file:///c:/hadoop/conf/dfs_data_dir_mount.hist</value>
     <description>File path that contains the last known mount point for each data dir. This file is used to avoid creating a DFS data dir on the root drive (and filling it up) if a path was previously mounted on a drive.</description>
   </property>
+
+  <property>
+    <name>proxyuser_group</name>
+    <deleted>true</deleted>
+  </property>
+  <property>
+    <name>hdfs_user</name>
+    <deleted>true</deleted>
+  </property>
+
+  <!-- hadoop-env.cmd -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for hadoop-env.cmd file</description>
+    <value>
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements.  See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License.  You may obtain a copy of the License at
+@rem
+@rem     http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+@rem Set Hadoop-specific environment variables here.
+
+@rem The only required environment variable is JAVA_HOME.  All others are
+@rem optional.  When running a distributed configuration it is best to
+@rem set JAVA_HOME in this file, so that it is correctly defined on
+@rem remote nodes.
+
+@rem The java implementation to use.  Required.
+set JAVA_HOME=%JAVA_HOME%
+
+@rem The jsvc implementation to use. Jsvc is required to run secure datanodes.
+@rem set JSVC_HOME=%JSVC_HOME%
+
+@rem set HADOOP_CONF_DIR=
+
+@rem Extra Java CLASSPATH elements.  Automatically insert capacity-scheduler.
+if exist %HADOOP_HOME%\contrib\capacity-scheduler (
+if not defined HADOOP_CLASSPATH (
+set HADOOP_CLASSPATH=%HADOOP_HOME%\contrib\capacity-scheduler\*.jar
+) else (
+set HADOOP_CLASSPATH=%HADOOP_CLASSPATH%;%HADOOP_HOME%\contrib\capacity-scheduler\*.jar
+)
+)
+
+@rem If TEZ_CLASSPATH is defined in the env, that means that TEZ is enabled
+@rem append it to the HADOOP_CLASSPATH
+
+if defined TEZ_CLASSPATH (
+if not defined HADOOP_CLASSPATH (
+set HADOOP_CLASSPATH=%TEZ_CLASSPATH%
+) else (
+set HADOOP_CLASSPATH=%HADOOP_CLASSPATH%;%TEZ_CLASSPATH%
+)
+)
+
+@rem The maximum amount of heap to use, in MB. Default is 1000.
+@rem set HADOOP_HEAPSIZE=
+@rem set HADOOP_NAMENODE_INIT_HEAPSIZE=""
+
+@rem Extra Java runtime options.  Empty by default.
+@rem set HADOOP_OPTS=%HADOOP_OPTS% -Djava.net.preferIPv4Stack=true
+
+@rem Command specific options appended to HADOOP_OPTS when specified
+if not defined HADOOP_SECURITY_LOGGER (
+set HADOOP_SECURITY_LOGGER=INFO,RFAS
+)
+if not defined HDFS_AUDIT_LOGGER (
+set HDFS_AUDIT_LOGGER=INFO,NullAppender
+)
+
+set HADOOP_NAMENODE_OPTS=-Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% -Dhdfs.audit.logger=%HDFS_AUDIT_LOGGER% %HADOOP_NAMENODE_OPTS%
+set HADOOP_DATANODE_OPTS=-Dhadoop.security.logger=ERROR,RFAS %HADOOP_DATANODE_OPTS%
+set HADOOP_SECONDARYNAMENODE_OPTS=-Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% -Dhdfs.audit.logger=%HDFS_AUDIT_LOGGER% %HADOOP_SECONDARYNAMENODE_OPTS%
+
+@rem The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+set HADOOP_CLIENT_OPTS=-Xmx512m %HADOOP_CLIENT_OPTS%
+@rem set HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData %HADOOP_JAVA_PLATFORM_OPTS%"
+
+@rem On secure datanodes, user to run the datanode as after dropping privileges
+set HADOOP_SECURE_DN_USER=%HADOOP_SECURE_DN_USER%
+
+@rem Where log files are stored.  %HADOOP_HOME%/logs by default.
+@rem set HADOOP_LOG_DIR=%HADOOP_LOG_DIR%\%USERNAME%
+
+@rem Where log files are stored in the secure data environment.
+set HADOOP_SECURE_DN_LOG_DIR=%HADOOP_LOG_DIR%\%HADOOP_HDFS_USER%
+
+@rem The directory where pid files are stored. /tmp by default.
+@rem NOTE: this should be set to a directory that can only be written to by
+@rem       the user that will run the hadoop daemons.  Otherwise there is the
+@rem       potential for a symlink attack.
+set HADOOP_PID_DIR=%HADOOP_PID_DIR%
+set HADOOP_SECURE_DN_PID_DIR=%HADOOP_PID_DIR%
+
+@rem A string representing this instance of hadoop. %USERNAME% by default.
+set HADOOP_IDENT_STRING=%USERNAME%
+    </value>
+  </property>
 </configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-policy.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-policy.xml
deleted file mode 100644
index 1549b41..0000000
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-policy.xml
+++ /dev/null
@@ -1,219 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-
- Copyright 2011 The Apache Software Foundation
-
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration supports_final="true">
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientProtocol, which is used by user code
-    via the DistributedFileSystem.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.client.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
-    for block recovery.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for DatanodeProtocol, which is used by datanodes to
-    communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
-    for updating generation timestamp.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.namenode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for NamenodeProtocol, the protocol used by the secondary
-    namenode to communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
- <property>
-    <name>security.admin.operations.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for AdminOperationsProtocol. Used for admin commands.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.refresh.usertogroups.mappings.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
-    users mappings. The ACL is a comma-separated list of user and
-    group names. The user and group list is separated by a blank. For
-    e.g. "alice,bob users,wheel".  A special value of "*" means all
-    users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.refresh.policy.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
-    dfsadmin and mradmin commands to refresh the security policy in-effect.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.ha.service.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HAService protocol used by HAAdmin to manage the
-      active and stand-by states of namenode.</description>
-  </property>
-
-  <property>
-    <name>security.zkfc.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for access to the ZK Failover Controller
-    </description>
-  </property>
-
-  <property>
-    <name>security.qjournal.service.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for QJournalProtocol, used by the NN to communicate with
-    JNs when using the QuorumJournalManager for edit logs.</description>
-  </property>
-
-  <property>
-    <name>security.mrhs.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HSClientProtocol, used by job clients to
-    communciate with the MR History Server job status etc.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <!-- YARN Protocols -->
-
-  <property>
-    <name>security.resourcetracker.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ResourceTrackerProtocol, used by the
-    ResourceManager and NodeManager to communicate with each other.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.resourcemanager-administration.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ResourceManagerAdministrationProtocol, for admin commands.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.applicationclient.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ApplicationClientProtocol, used by the ResourceManager
-    and applications submission clients to communicate with each other.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.applicationmaster.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ApplicationMasterProtocol, used by the ResourceManager
-    and ApplicationMasters to communicate with each other.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.containermanagement.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ContainerManagementProtocol protocol, used by the NodeManager
-    and ApplicationMasters to communicate with each other.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.resourcelocalizer.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ResourceLocalizer protocol, used by the NodeManager
-    and ResourceLocalizer to communicate with each other.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.job.task.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
-    tasks to communicate with the parent tasktracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.job.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for MRClientProtocol, used by job clients to
-    communciate with the MR ApplicationMaster to query job status etc.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-</configuration>


[2/3] ambari git commit: AMBARI-8876: Common Services: Refactor HDPWIN 2.1 stack to use common services (Jayush Luniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hdfs-site.xml
index f267e51..63e4c95 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hdfs-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hdfs-site.xml
@@ -1,8 +1,26 @@
 <?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
 <!-- Put site-specific property overrides in this file. -->
 <configuration supports_final="true">
-  <!-- file system properties -->
+
   <property>
     <name>dfs.namenode.name.dir</name>
     <value>file:///c:/hdpdata/hdfs/nn</value>
@@ -13,24 +31,6 @@
     <final>true</final>
   </property>
   <property>
-    <name>dfs.support.append</name>
-    <value>true</value>
-    <description>to enable dfs append</description>
-    <final>true</final>
-  </property>
-  <property>
-    <name>dfs.webhdfs.enabled</name>
-    <value>true</value>
-    <description>to enable webhdfs</description>
-    <final>true</final>
-  </property>
-  <property>
-    <name>dfs.datanode.failed.volumes.tolerated</name>
-    <value>0</value>
-    <description>#of failed disks dn would tolerate</description>
-    <final>true</final>
-  </property>
-  <property>
     <name>dfs.datanode.data.dir</name>
     <value>file:///c:/hdpdata/hdfs/dn</value>
     <description>Determines where on the local filesystem an DFS data node
@@ -42,231 +42,41 @@
     <final>true</final>
   </property>
   <property>
-    <name>dfs.checksum.type</name>
-    <value>CRC32</value>
-    <description>The checksum method to be used by default. To maintain
-    compatibility, it is being set to CRC32. Once all migration steps
-    are complete, we can change it to CRC32C and take advantage of the
-    additional performance benefit.</description>
-  </property>
-  <property>
-    <name>dfs.replication.max</name>
-    <value>50</value>
-    <description>Maximal block replication.
-  </description>
-  </property>
-  <property>
-    <name>dfs.heartbeat.interval</name>
-    <value>3</value>
-    <description>Determines datanode heartbeat interval in seconds.</description>
-  </property>
-  <property>
-    <name>dfs.namenode.safemode.threshold-pct</name>
-    <value>1.0f</value>
-    <description>
-        Specifies the percentage of blocks that should satisfy
-        the minimal replication requirement defined by dfs.replication.min.
-        Values less than or equal to 0 mean not to start in safe mode.
-        Values greater than 1 will make safe mode permanent.
-        </description>
-  </property>
-  <property>
-    <name>dfs.datanode.balance.bandwidthPerSec</name>
-    <value>6250000</value>
-    <description>
-        Specifies the maximum amount of bandwidth that each datanode
-        can utilize for the balancing purpose in term of
-        the number of bytes per second.
-  </description>
-  </property>
-  <property>
-    <name>dfs.datanode.address</name>
-    <value>0.0.0.0:50010</value>
-  </property>
-  <property>
-    <name>dfs.datanode.http.address</name>
-    <value>0.0.0.0:50075</value>
-  </property>
-  <property>
-    <name>dfs.datanode.https.address</name>
-    <value>0.0.0.0:50076</value>
-  </property>
-  <property>
-    <name>dfs.blocksize</name>
-    <value>134217728</value>
-    <description>The default block size for new files, in bytes.
-      You can use the following suffix (case insensitive): k(kilo),
-      m(mega), g(giga), t(tera), p(peta), e(exa) to specify the
-      size (such as 128k, 512m, 1g, etc.), Or provide complete size
-      in bytes (such as 134217728 for 128 MB).</description>
-  </property>
-  <property>
-    <name>dfs.namenode.http-address</name>
-    <value>localhost:50070</value>
-    <description>The address and the base port where the dfs namenode
-      web ui will listen on. If the port is 0 then the server will
-      start on a free port.</description>
-    <final>true</final>
-  </property>
-  <property>
-    <name>dfs.https.port</name>
-    <value>50070</value>
-    <final>true</final>
-  </property>
-  <property>
-    <name>dfs.datanode.du.reserved</name>
-    <value>1073741824</value>
-    <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
-    </description>
-  </property>
-  <property>
-    <name>dfs.datanode.ipc.address</name>
-    <value>0.0.0.0:8010</value>
-    <description>The datanode ipc server address and port.
-      If the port is 0 then the server will start on a free port.
-    </description>
-  </property>
-  <property>
-    <name>dfs.blockreport.initialDelay</name>
-    <value>120</value>
-    <description>Delay for first block report in seconds.</description>
-  </property>
-  <property>
-    <name>dfs.datanode.du.pct</name>
-    <value>0.85f</value>
-    <description>When calculating remaining space, only use this percentage of the real available space
-    </description>
+    <name>dfs.hosts.exclude</name>
+    <value>c:\hdp\hadoop\etc\hadoop\dfs.exclude</value>
+    <description>Names a file that contains a list of hosts that are
+      not permitted to connect to the namenode.  The full pathname of the
+      file must be specified.  If the value is empty, no hosts are
+      excluded.</description>
   </property>
   <property>
-    <name>dfs.namenode.handler.count</name>
-    <value>40</value>
-    <description>The number of server threads for the namenode.</description>
+    <name>dfs.hosts</name>
+    <value>c:\hdp\hadoop\etc\hadoop\dfs.include</value>
+    <description>Names a file that contains a list of hosts that are
+      permitted to connect to the namenode. The full pathname of the file
+      must be specified.  If the value is empty, all hosts are
+      permitted.</description>
   </property>
   <property>
     <name>dfs.namenode.checkpoint.dir</name>
-    <value>file:///c:/hdpdata/hdfs/snn</value>
+    <value>file:///c:/hadoop/hdfs/snn</value>
     <description>Determines where on the local filesystem the DFS secondary
-        name node should store the temporary images to merge.
-        If this is a comma-delimited list of directories then the image is
-        replicated in all of the directories for redundancy.
+      name node should store the temporary images to merge.
+      If this is a comma-delimited list of directories then the image is
+      replicated in all of the directories for redundancy.
     </description>
   </property>
+
   <property>
-    <name>dfs.namenode.checkpoint.edits.dir</name>
-    <value>file:///c:/hadoop/hdfs/namesecondary</value>
-    <description>Determines where on the local filesystem the DFS secondary
-        name node should store the temporary edits to merge.
-        If this is a comma-delimited list of directoires then teh edits is
-        replicated in all of the directoires for redundancy.
-        Default value is same as dfs.namenode.checkpoint.dir
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.checkpoint.period</name>
-    <value>86400</value>
-    <description>The number of seconds between two periodic checkpoints.
-    </description>
-  </property>
-  <property>
-    <name>dfs.datanode.max.transfer.threads</name>
-    <value>1024</value>
-    <description>Specifies the maximum number of threads to use for
-      transferring data in and out of the DN.</description>
-  </property>
-  <!-- Permissions configuration -->
-  <property>
-    <name>dfs.permissions.enabled</name>
-    <value>true</value>
-    <description>
-        If "true", enable permission checking in HDFS.
-        If "false", permission checking is turned off,
-        but all other behavior is unchanged.
-        Switching from one parameter value to the other does not change the mode,
-        owner or group of files or directories.
-    </description>
-  </property>
-  <property>
-    <name>dfs.permissions.superusergroup</name>
-    <value>hdfs</value>
-    <description>The name of the group of super-users.</description>
-  </property>
-  <property>
-    <name>ipc.server.max.response.size</name>
-    <value>5242880</value>
-  </property>
-  <property>
-    <name>dfs.block.access.token.enable</name>
-    <value>false</value>
-    <description>
-        If "true", access tokens are used as capabilities for accessing datanodes.
-        If "false", no access tokens are checked on accessing datanodes.
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.secondary.http-address</name>
-    <value>localhost:50090</value>
-    <description>Address of secondary namenode web server</description>
-  </property>
-  <property>
-    <name>dfs.secondary.https.port</name>
-    <value>50091</value>
-    <description>The https port where secondary-namenode binds</description>
-  </property>
-  <property>
-    <name>dfs.namenode.https-address</name>
-    <value>localhost:50701</value>
-    <description>The https address where namenode binds</description>
-  </property>
-  <property>
-    <name>dfs.datanode.data.dir.perm</name>
-    <value>755</value>
-    <description>The permissions that should be there on dfs.data.dir
-        directories. The datanode will not come up if the permissions are
-        different on existing dfs.data.dir directories. If the directories
-        don't exist, they will be created with this permission.</description>
-  </property>
-  <property>
-    <name>dfs.namenode.accesstime.precision</name>
-    <value>0</value>
-    <description>The access time for HDFS file is precise upto this value.
-               The default value is 1 hour. Setting a value of 0 disables
-               access times for HDFS.
-    </description>
-  </property>
-  <property>
-    <name>dfs.cluster.administrators</name>
-    <value>hdfs</value>
-    <description>ACL for who all can view the default servlets in the HDFS</description>
-  </property>
-  <property>
-    <name>ipc.server.read.threadpool.size</name>
-    <value>5</value>
-    <description />
-  </property>
-  <property>
-    <name>dfs.encrypt.data.transfer</name>
-    <value>false</value>
-  </property>
-  <property>
-    <name>dfs.encrypt.data.transfer.algorithm</name>
-    <value>3des</value>
-  </property>
-  <property>
-    <name>dfs.https.enable</name>
-    <value>false</value>
-  </property>
-  <property>
-    <name>dfs.replication</name>
-    <value>1</value>
+    <name>dfs.client.read.shortcircuit</name>
+    <deleted>true</deleted>
   </property>
-
   <property>
-    <name>dfs.hosts.exclude</name>
-    <value>c:\hdp\hadoop\etc\hadoop\dfs.exclude</value>
+    <name>dfs.client.read.shortcircuit.streams.cache.size</name>
+    <deleted>true</deleted>
   </property>
-
   <property>
-    <name>dfs.hosts</name>
-    <value>c:\hdp\hadoop\etc\hadoop\dfs.include</value>
+    <name>dfs.domain.socket.path</name>
+    <deleted>true</deleted>
   </property>
 </configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/metainfo.xml
index 127e205..fe1bb17 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/metainfo.xml
@@ -20,143 +20,21 @@
   <services>
     <service>
       <name>HDFS</name>
-      <displayName>HDFS</displayName>
-      <comment>Apache Hadoop Distributed File System</comment>
-      <version>2.1.0.2.0</version>
+      <extends>common-services/HDFS/2.1.0.2.0</extends>
+      <version>2.4.0.2.1.1.0</version>
 
       <components>
         <component>
-          <name>NAMENODE</name>
-          <displayName>NameNode</displayName>
-          <category>MASTER</category>
-          <cardinality>1-2</cardinality>
-          <commandScript>
-            <script>scripts/namenode.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <customCommands>
-            <customCommand>
-              <name>DECOMMISSION</name>
-              <commandScript>
-                <script>scripts/namenode.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>600</timeout>
-              </commandScript>
-            </customCommand>
-            <customCommand>
-              <name>REBALANCEHDFS</name>
-              <background>true</background>
-              <commandScript>
-                <script>scripts/namenode.py</script>
-                <scriptType>PYTHON</scriptType>
-              </commandScript>
-            </customCommand>
-          </customCommands>
-        </component>
-
-        <component>
-          <name>DATANODE</name>
-          <displayName>DataNode</displayName>
-          <category>SLAVE</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts/datanode.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>SECONDARY_NAMENODE</name>
-          <displayName>SNameNode</displayName>
-          <!-- TODO:  cardinality is conditional on HA usage -->
-          <cardinality>1</cardinality>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/snamenode.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
           <name>HDFS_CLIENT</name>
-          <displayName>HDFS Client</displayName>
-          <category>CLIENT</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts/hdfs_client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
           <configFiles>
             <configFile>
-              <type>xml</type>
-              <fileName>hdfs-site.xml</fileName>
-              <dictionaryName>hdfs-site</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>xml</type>
-              <fileName>core-site.xml</fileName>
-              <dictionaryName>core-site</dictionaryName>
-            </configFile>
-            <configFile>
               <type>env</type>
-              <fileName>log4j.properties</fileName>
-              <dictionaryName>hdfs-log4j,yarn-log4j</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>hadoop-env.cmds</fileName>
+              <fileName>hadoop-env.cmd</fileName>
               <dictionaryName>hadoop-env</dictionaryName>
             </configFile>
           </configFiles>
         </component>
-
-        <component>
-          <name>JOURNALNODE</name>
-          <displayName>JournalNode</displayName>
-          <category>SLAVE</category>
-          <cardinality>0+</cardinality>
-          <commandScript>
-            <script>scripts/journalnode.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>ZKFC</name>
-          <displayName>ZKFailoverController</displayName>
-          <category>SLAVE</category>
-          <!-- TODO: cardinality is conditional on HA topology -->
-          <cardinality>0+</cardinality>
-          <commandScript>
-            <script>scripts/zkfc_slave.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
       </components>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <requiredServices>
-        <service>ZOOKEEPER</service>
-      </requiredServices>
-
-      <configuration-dependencies>
-        <config-type>core-site</config-type>
-        <config-type>hdfs-site</config-type>
-        <config-type>hadoop-env</config-type>
-        <config-type>hadoop-policy</config-type>
-        <config-type>hdfs-log4j</config-type>
-      </configuration-dependencies>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/hcat-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/hcat-env.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/hcat-env.xml
new file mode 100644
index 0000000..ee18f17
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/hcat-env.xml
@@ -0,0 +1,31 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <!-- hcat-env.cmd -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for hcat-env.cmd file</description>
+    <value>
+    </value>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/hive-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/hive-env.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/hive-env.xml
index 57144be..6bdb8f3 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/hive-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/hive-env.xml
@@ -27,13 +27,6 @@
     <description>Default HIVE DB type.</description>
   </property>
   <property>
-    <name>hive_hostname</name>
-    <value></value>
-    <description>
-      Specify the host on which the HIVE database is hosted.
-    </description>
-  </property>
-  <property>
     <name>hive_database</name>
     <value>Existing MSSQL Server database with sql auth</value>
     <description>
@@ -42,57 +35,43 @@
   </property>
   <property>
     <name>hive_ambari_database</name>
-    <value>MySQL</value>
+    <value>MSSQL</value>
     <description>Database type.</description>
   </property>
   <property>
-    <name>hive_database_name</name>
-    <value>hive</value>
-    <description>Database name.</description>
-  </property>
-  <property>
-    <name>hive_dbroot</name>
-    <value>/usr/lib/hive/lib/</value>
-    <description>Hive DB Directory.</description>
-  </property>
-  <property>
     <name>hive_log_dir</name>
-    <value>/var/log/hive</value>
+    <value>c:\hadoop\logs\hive</value>
     <description>Directory for Hive Log files.</description>
   </property>
   <property>
     <name>hive_pid_dir</name>
-    <value>/var/run/hive</value>
+    <value>c:\hadoop\run\hive</value>
     <description>Hive PID Dir.</description>
   </property>
   <property>
     <name>hive_user</name>
-    <value>hive</value>
-    <description>Hive User.</description>
+    <deleted>true</deleted>
   </property>
 
   <!--HCAT-->
-
-  <!--<property>
+  <property>
     <name>hcat_log_dir</name>
-    <value>/var/log/webhcat</value>
+    <value>c:\hadoop\logs\webhcat</value>
     <description>WebHCat Log Dir.</description>
   </property>
   <property>
     <name>hcat_pid_dir</name>
-    <value>/var/run/webhcat</value>
+    <value>c:\hadooop\run\webhcat</value>
     <description>WebHCat Pid Dir.</description>
   </property>
   <property>
     <name>hcat_user</name>
-    <value>hcat</value>
-    <description>HCat User.</description>
+    <deleted>true</deleted>
   </property>
   <property>
     <name>webhcat_user</name>
-    <value>hcat</value>
-    <description>WebHCat User.</description>
-  </property>-->
+    <deleted>true</deleted>
+  </property>
 
   <!-- hive-env.cmd -->
   <property>
@@ -101,5 +80,4 @@
     <value>
     </value>
   </property>
-
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/hive-site.xml
index 3f90c76..e479f79 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/hive-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/hive-site.xml
@@ -3,54 +3,7 @@
 
 <configuration supports_final="true">
 
-  <!-- Hive Configuration can either be stored in this file or in the hadoop configuration files  -->
-  <!-- that are implied by Hadoop setup variables.                                                -->
-  <!-- Aside from Hadoop setup variables - this file is provided as a convenience so that Hive    -->
-  <!-- users do not have to edit hadoop configuration files (that may be managed as a centralized -->
-  <!-- resource).                                                                                 -->
-
-  <!-- Hive Execution Parameters -->
-
-  <property>
-    <name>hive.metastore.uris</name>
-    <value>thrift://localhost:9083</value>
-  </property>
-
-  <property>
-    <name>hive.metastore.connect.retries</name>
-    <value>5</value>
-    <description>Number of retries while opening a connection to metastore</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.ds.retry.attempts</name>
-    <value>0</value>
-    <description>The number of times to retry a metastore call if there were a connection error</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.ds.retry.interval</name>
-    <value>1000</value>
-    <description>The number of miliseconds between metastore retry attempts</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.execute.setugi</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.hmshandler.retry.attempts</name>
-    <value>5</value>
-    <description>The number of times to retry a HMSHandler call if there were a connection error</description>
-  </property>
-
-  <property>
-    <name>hive.hmshandler.retry.interval</name>
-    <value>1000</value>
-    <description>The number of miliseconds between HMSHandler retry attempts</description>
-  </property>
-
+  <!-- Windows specific properties -->
   <property>
     <name>javax.jdo.option.ConnectionURL</name>
     <value></value>
@@ -64,228 +17,249 @@
   </property>
 
   <property>
-    <name>ambari.hive.db.schema.name</name>
-    <value>hive</value>
-    <description>Database name used as the Hive Metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionUserName</name>
-    <value>hive</value>
-    <description>username to use against metastore database</description>
-  </property>
-
-  <property require-input="true">
-    <name>javax.jdo.option.ConnectionPassword</name>
-    <value></value>
-    <type>PASSWORD</type>
-    <description>password to use against metastore database</description>
+    <name>hive.querylog.location</name>
+    <value>c:\hadoop\logs\hive</value>
   </property>
 
   <property>
-    <name>hive.metastore.warehouse.dir</name>
-    <value>/hive/warehouse</value>
-    <description>location of default database for the warehouse</description>
+    <name>hive.log.dir</name>
+    <value>c:\hadoop\logs\hive</value>
   </property>
 
+  <!-- New/Updated properties for 2.1 -->
   <property>
-    <name>hive.hwi.listen.host</name>
-    <value>0.0.0.0</value>
-    <description>This is the host address the Hive Web Interface will listen on</description>
+    <name>hive.metastore.kerberos.keytab.file</name>
+    <value>/etc/security/keytabs/hive.service.keytab</value>
+    <description>The path to the Kerberos Keytab file containing the metastore
+      thrift server's service principal.</description>
   </property>
 
   <property>
-    <name>hive.hwi.listen.port</name>
-    <value>9999</value>
-    <description>This is the port the Hive Web Interface will listen on</description>
+    <name>hive.metastore.kerberos.principal</name>
+    <value>hive/_HOST@EXAMPLE.COM</value>
+    <description>The service principal for the metastore thrift server. The special
+      string _HOST will be replaced automatically with the correct host name.</description>
   </property>
 
   <property>
-    <name>hive.hwi.war.file</name>
-    <value>lib\hive-hwi-@hive.version@.war</value>
-    <description>This is the WAR file with the jsp content for Hive Web Interface</description>
+    <name>hive.enforce.sortmergebucketmapjoin</name>
+    <value>true</value>
+    <description>If the user asked for sort-merge bucketed map-side join, and it cannot be performed, should the query fail or not</description>
   </property>
 
   <property>
-    <name>hive.server2.transport.mode</name>
-    <value>binary</value>
-    <description>Server transport mode. "binary" or "http".</description>
+    <name>hive.optimize.reducededuplication.min.reducer</name>
+    <value>4</value>
+    <description>Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS.
+      That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.
+      The optimization will be disabled if number of reducers is less than specified value.
+    </description>
   </property>
 
   <property>
-    <name>hive.server2.thrift.http.port</name>
-    <value>10001</value>
-    <description>Port number when in HTTP mode.</description>
+    <name>hive.vectorized.execution.enabled</name>
+    <value>true</value>
+    <description>This flag controls the vectorized mode of query execution as documented in HIVE-4160 (as of Hive 0.13.0)
+    </description>
   </property>
 
   <property>
-    <name>hive.server2.thrift.http.path</name>
-    <value>/</value>
-    <description>Path component of URL endpoint when in HTTP mode.</description>
+    <name>hive.execution.engine</name>
+    <value>mr</value>
+    <description>Whether to use MR or Tez</description>
   </property>
 
   <property>
-    <name>hive.server2.thrift.http.min.worker.threads</name>
-    <value>5</value>
-    <description>Minimum number of worker threads when in HTTP mode.</description>
+    <name>hive.exec.post.hooks</name>
+    <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
+    <description>Comma-separated list of post-execution hooks to be invoked for each statement.</description>
   </property>
 
   <property>
-    <name>hive.server2.thrift.http.max.worker.threads</name>
-    <value>100</value>
-    <description>Maximum number of worker threads when in HTTP mode.</description>
+    <name>hive.exec.pre.hooks</name>
+    <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
+    <description>Comma-separated list of pre-execution hooks to be invoked for each statement.</description>
   </property>
 
   <property>
-    <name>hive.server2.thrift.port</name>
-    <value>10001</value>
-    <description>HiveServer2 thrift port</description>
+    <name>hive.exec.failure.hooks</name>
+    <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
+    <description>Comma-separated list of on-failure hooks to be invoked for each statement.</description>
   </property>
 
   <property>
-    <name>hive.server2.enable.doAs</name>
-    <value>false</value>
+    <name>hive.vectorized.groupby.maxentries</name>
+    <value>100000</value>
+    <description>Max number of entries in the vector group by aggregation hashtables.
+      Exceeding this will trigger a flush irrelevant of memory pressure condition.
+    </description>
   </property>
 
   <property>
-    <name>hive.security.authorization.enabled</name>
-    <value>true</value>
-    <description>enable or disable the hive client authorization</description>
+    <name>hive.vectorized.groupby.checkinterval</name>
+    <value>1024</value>
+    <description>Number of entries added to the group by aggregation hash before a reocmputation of average entry size is performed.</description>
   </property>
 
   <property>
-    <name>hive.security.authorization.manager</name>
-    <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
+    <name>hive.vectorized.groupby.flush.percent</name>
+    <value>0.1</value>
+    <description>Percent of entries in the group by aggregation hash flushed when the memory treshold is exceeded.</description>
   </property>
 
   <property>
-    <name>hive.optimize.mapjoin.mapreduce</name>
+    <name>hive.stats.autogather</name>
     <value>true</value>
+    <description>A flag to gather statistics automatically during the INSERT OVERWRITE command.</description>
   </property>
 
   <property>
-    <name>hive.enforce.bucketing</name>
-    <value>true</value>
+    <name>hive.tez.container.size</name>
+    <value>682</value>
+    <description>By default, Tez uses the java options from map tasks. Use this property to override that value.</description>
   </property>
 
   <property>
-    <name>hive.enforce.sorting</name>
-    <value>true</value>
+    <name>hive.tez.input.format</name>
+    <value>org.apache.hadoop.hive.ql.io.HiveInputFormat</value>
+    <description>The default input format for Tez. Tez groups splits in the Application Master.</description>
   </property>
 
   <property>
-    <name>hive.optimize.index.filter</name>
-    <value>true</value>
+    <name>hive.tez.java.opts</name>
+    <value>-server -Xmx545m -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC -XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps</value>
+    <description>Java command line options for Tez. The -Xmx parameter value is generally 80% of hive.tez.container.size.</description>
   </property>
 
   <property>
-    <name>hive.mapred.reduce.tasks.speculative.execution</name>
-    <value>false</value>
+    <name>hive.compute.query.using.stats</name>
+    <value>true</value>
+    <description>
+      When set to true Hive will answer a few queries like count(1) purely using stats
+      stored in metastore. For basic stats collection turn on the config hive.stats.autogather to true.
+      For more advanced stats collection need to run analyze table queries.
+    </description>
   </property>
 
   <property>
     <name>hive.orc.splits.include.file.footer</name>
     <value>false</value>
+    <description>
+      If turned on splits generated by orc will include metadata about the stripes in the file. This
+      data is read remotely (from the client or HS2 machine) and sent to all the tasks.
+    </description>
   </property>
 
   <property>
-    <name>hive.exec.local.cache</name>
+    <name>hive.limit.optimize.enable</name>
     <value>true</value>
+    <description>Whether to enable the optimization of trying a smaller subset of data for simple LIMIT first.</description>
   </property>
 
   <property>
-    <name>hive.vectorized.execution.enabled</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.vectorized.groupby.flush.percent</name>
-    <value>1.0</value>
+    <name>hive.limit.pushdown.memory.usage</name>
+    <value>0.04</value>
+    <description>The max memory to be used for hash in RS operator for top K selection.</description>
   </property>
 
   <property>
-    <name>hive.vectorized.groupby.checkinterval</name>
-    <value>1024</value>
+    <name>hive.server2.tez.default.queues</name>
+    <value>default</value>
+    <description>A comma-separated list of queues configured for the cluster.</description>
   </property>
 
   <property>
-    <name>hive.vectorized.groupby.maxentries</name>
-    <value>1024</value>
+    <name>hive.server2.tez.sessions.per.default.queue</name>
+    <value>1</value>
+    <description>The number of sessions for each queue named in the hive.server2.tez.default.queues.</description>
   </property>
 
   <property>
-    <name>hive.optimize.bucketmapjoin.sortedmerge</name>
+    <name>hive.server2.tez.initialize.default.sessions</name>
     <value>false</value>
+    <description>Enables a user to use HiveServer2 without enabling Tez for HiveServer2. Users may potentially may want to run queries with Tez without a pool of sessions.</description>
   </property>
 
   <property>
-    <name>hive.optimize.bucketmapjoin</name>
-    <value>true</value>
+    <name>hive.txn.manager</name>
+    <value>org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager</value>
+    <description>Select the class to do transaction management. The default DummyTxnManager does no transactions and retains the legacy behavior.</description>
   </property>
 
   <property>
-    <name>hive.enforce.sortmergebucketmapjoin</name>
-    <value>true</value>
+    <name>hive.txn.timeout</name>
+    <value>300</value>
+    <description>Time after which transactions are declared aborted if the client has not sent a heartbeat, in seconds.</description>
   </property>
 
   <property>
-    <name>hive.convert.join.bucket.mapjoin.tez</name>
-    <value>false</value>
+    <name>hive.txn.max.open.batch</name>
+    <value>1000</value>
+    <description>Maximum number of transactions that can be fetched in one call to open_txns(). Increasing this will decrease the number of delta files created when streaming data into Hive. But it will also increase the number of open transactions at any given time, possibly impacting read performance.</description>
   </property>
 
   <property>
-    <name>hive.auto.convert.sortmerge.join</name>
-    <value>true</value>
+    <name>hive.compactor.initiator.on</name>
+    <value>false</value>
+    <description>Whether to run the compactor's initiator thread in this metastore instance or not. If there is more than one instance of the thrift metastore this should only be set to true for one of them.</description>
   </property>
 
   <property>
-    <name>hive.auto.convert.sortmerge.join.noconditionaltask</name>
-    <value>true</value>
+    <name>hive.compactor.worker.threads</name>
+    <value>0</value>
+    <description>Number of compactor worker threads to run on this metastore instance. Can be different values on different metastore instances.</description>
   </property>
 
   <property>
-    <name>hive.server2.tez.sessions.per.default.queue</name>
-    <value>1</value>
+    <name>hive.compactor.worker.timeout</name>
+    <value>86400L</value>
+    <description>Time, in seconds, before a given compaction in working state is declared a failure and returned to the initiated state.</description>
   </property>
 
   <property>
-    <name>hive.server2.tez.initialize.default.sessions</name>
-    <value>false</value>
+    <name>hive.compactor.check.interval</name>
+    <value>300L</value>
+    <description>Time in seconds between checks to see if any partitions need compacted. This should be kept high because each check for compaction requires many calls against the NameNode.</description>
   </property>
 
   <property>
-    <name>hive.server2.tez.default.queues</name>
-    <value>default</value>
+    <name>hive.compactor.delta.num.threshold</name>
+    <value>10</value>
+    <description>Number of delta files that must exist in a directory before the compactor will attempt a minor compaction.</description>
   </property>
 
   <property>
-    <name>hive.stats.dbclass</name>
-    <value>fs</value>
+    <name>hive.compactor.delta.pct.threshold</name>
+    <value>0.1f</value>
+    <description>Percentage (by size) of base that deltas can be before major compaction is initiated.</description>
   </property>
 
   <property>
-    <name>hive.compute.query.using.stats</name>
-    <value>true</value>
+    <name>hive.compactor.abortedtxn.threshold</name>
+    <value>1000</value>
+    <description>Number of aborted transactions involving a particular table or partition before major compaction is initiated.</description>
   </property>
 
-
   <property>
-    <name>hive.querylog.location</name>
-    <value>c:\hadoop\logs\hive</value>
+    <name>datanucleus.cache.level2.type</name>
+    <value>none</value>
+    <description>Determines caching mechanism DataNucleus L2 cache will use. It is strongly recommended to use default value of 'none' as other values may cause consistency errors in Hive.</description>
   </property>
 
   <property>
-    <name>hive.log.dir</name>
-    <value>c:\hadoop\logs\hive</value>
+    <name>hive.server2.authentication.spnego.principal</name>
+    <value>HTTP/_HOST@EXAMPLE.COM</value>
+    <description>
+      This keytab would be used by HiveServer2 when Kerberos security is enabled and HTTP transport mode is used.
+    </description>
   </property>
 
   <property>
-    <name>hive.stats.autogather</name>
-    <value>true</value>
+    <name>hive.server2.authentication.spnego.keytab</name>
+    <value>/etc/security/keytabs/spnego.service.keytab</value>
+    <description>
+      The SPNEGO service principal would be used by HiveServer2 when Kerberos security is enabled and HTTP transport mode is used.
+    </description>
   </property>
 
-  <property>
-    <name>hive.execution.engine</name>
-    <value>mr</value>
-  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/webhcat-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/webhcat-env.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/webhcat-env.xml
new file mode 100644
index 0000000..cd0cb75
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/webhcat-env.xml
@@ -0,0 +1,31 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <!-- webhcat-env.cmd -->
+  <property>
+    <name>content</name>
+    <description>webhcat-env.cmd content</description>
+    <value>
+    </value>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/webhcat-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/webhcat-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/webhcat-site.xml
index bae9712..7b95ed8 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/webhcat-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/configuration/webhcat-site.xml
@@ -21,57 +21,32 @@ limitations under the License.
 <!-- Edit templeton-site.xml to change settings for your local -->
 <!-- install. -->
 
-<configuration>
-
-  <property>
-    <name>templeton.port</name>
-    <value>50111</value>
-    <description>The HTTP port for the main server.</description>
-  </property>
+<configuration supports_final="true">
 
   <property>
     <name>templeton.jar</name>
-    <value>c:\hdp\hive\hcatalog\share\webhcat\svr\lib\hive-webhca.jar</value>
+    <value>c:\hdp\hive\hcatalog\share\webhcat\svr\lib\hive-webhcat.jar</value>
     <description>The path to the Templeton jar file.</description>
   </property>
 
   <property>
-    <name>templeton.override.enabled</name>
-    <value>false</value>
-    <description>
-      Enable the override path in templeton.override.jars
-    </description>
-  </property>
-
-  <property>
     <name>templeton.hcat</name>
-    <value>${env.HCAT_HOME}/bin/hcat.py</value>
+    <value>${env.HCAT_HOME}\bin\hcat.py</value>
     <description>The path to the hcatalog executable.</description>
   </property>
 
   <property>
     <name>templeton.hadoop</name>
-    <value>${env.HADOOP_HOME}/bin/hadoop.cmd</value>
+    <value>${env.HADOOP_HOME}\bin\hadoop.cmd</value>
     <description>The path to the Hadoop executable.</description>
   </property>
 
   <property>
-    <name>templeton.exec.envs</name>
-    <value>HADOOP_HOME,JAVA_HOME,HIVE_HOME,TEMP,HADOOP_BIN_PATH,PATH,SystemRoot,TEZ_CLASSPATH</value>
-    <description>The environment variables passed through to exec.</description>
-  </property>
-
-  <property>
     <name>templeton.streaming.jar</name>
     <value>file:///c:/hdp/hadoop/share/hadoop/tools/lib/hadoop-streaming.jar</value>
     <description>The hdfs path to the Hadoop streaming jar file.</description>
   </property>
 
-  <property>
-    <name>templeton.hive.properties</name>
-    <value>hive.metastore.local=false,hive.metastore.uris=thrift://WIN-QS1HDPKHRAM:9083</value>
-    <description>Properties to set when running hive.</description>
-  </property>
 
   <property>
     <name>templeton.libjars</name>
@@ -87,23 +62,7 @@ limitations under the License.
 
   <property>
     <name>templeton.hive.path</name>
-    <value>${env.HIVE_HOME}/bin/hive.cmd</value>
+    <value>${env.HIVE_HOME}\bin\hive.cmd</value>
     <description>The path to the Hive executable.</description>
   </property>
-
-
-  <property>
-    <name>templeton.hadoop.queue.name</name>
-    <value>joblauncher</value>
-  </property>
-
-  <property>
-    <name>templeton.zookeeper.hosts</name>
-    <value>localhost:2181</value>
-  </property>
-
-  <property>
-    <name>templeton.storage.class</name>
-    <value>org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage</value>
-  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/metainfo.xml
index af45930..c601a94 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HIVE/metainfo.xml
@@ -20,147 +20,13 @@
   <services>
     <service>
       <name>HIVE</name>
-      <displayName>Hive</displayName>
-      <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
-      <version>0.12.0.2.0</version>
+      <extends>common-services/HIVE/0.12.0.2.0</extends>
+      <version>0.13.0.2.1.1.0</version>
       <components>
-
-        <component>
-          <name>HIVE_METASTORE</name>
-          <displayName>Hive Metastore</displayName>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <auto-deploy>
-            <enabled>true</enabled>
-            <co-locate>HIVE/HIVE_SERVER</co-locate>
-          </auto-deploy>
-          <commandScript>
-            <script>scripts/hive_metastore.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>1200</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>HIVE_SERVER</name>
-          <displayName>HiveServer2</displayName>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <dependencies>
-            <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
-              <scope>cluster</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-                <co-locate>HIVE/HIVE_SERVER</co-locate>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>TEZ/TEZ_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>YARN/YARN_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/hive_server.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-        <component>
-          <name>WEBHCAT_SERVER</name>
-          <displayName>WebHCat Server</displayName>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <clientsToUpdateConfigs>
-            <client>HCAT</client>
-          </clientsToUpdateConfigs>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
-              <scope>cluster</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-                <co-locate>HIVE/WEBHCAT_SERVER</co-locate>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>YARN/YARN_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/webhcat_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>1200</timeout>
-          </commandScript>
-        </component>
-        <component>
-          <name>MYSQL_SERVER</name>
-          <displayName>MySQL Server</displayName>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <commandScript>
-            <script>scripts/mysql_server.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-
         <component>
           <name>HIVE_CLIENT</name>
-          <displayName>Hive Client</displayName>
-          <category>CLIENT</category>
-          <cardinality>0+</cardinality>
-          <commandScript>
-            <script>scripts/hive_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
           <configFiles>
             <configFile>
-              <type>xml</type>
-              <fileName>hive-site.xml</fileName>
-              <dictionaryName>hive-site</dictionaryName>
-            </configFile>
-            <configFile>
               <type>env</type>
               <fileName>hive-env.cmd</fileName>
               <dictionaryName>hive-env</dictionaryName>
@@ -169,13 +35,6 @@
         </component>
         <component>
           <name>HCAT</name>
-          <displayName>HCat Client</displayName>
-          <category>CLIENT</category>
-          <cardinality>0+</cardinality>
-          <commandScript>
-            <script>scripts/hcat_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
           <configFiles>
             <configFile>
               <type>env</type>
@@ -184,27 +43,7 @@
             </configFile>
           </configFiles>
         </component>
-
       </components>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>1200</timeout>
-      </commandScript>
-
-      <requiredServices>
-        <service>ZOOKEEPER</service>
-        <service>YARN</service>
-        <service>TEZ</service>
-      </requiredServices>
-
-      <configuration-dependencies>
-        <config-type>hive-site</config-type>
-        <config-type>hive-env</config-type>
-        <config-type>webhcat-site</config-type>
-      </configuration-dependencies>
     </service>
-
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-env.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-env.xml
index 423db73..4f690b5 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-env.xml
@@ -23,8 +23,7 @@
 <configuration>
   <property>
     <name>oozie_user</name>
-    <value>oozie</value>
-    <description>Oozie User.</description>
+    <deleted>true</deleted>
   </property>
   <property>
     <name>oozie_hostname</name>
@@ -39,32 +38,22 @@
     <description>Oozie Server Database.</description>
   </property>
   <property>
-    <name>oozie_derby_database</name>
-    <value>Derby</value>
-    <description>Oozie Derby Database</description>
-  </property>
-  <property>
     <name>oozie_data_dir</name>
-    <value>/hadoop/oozie/data</value>
+    <value>c:\hadoop\oozie\data</value>
     <description>Data directory in which the Oozie DB exists</description>
   </property>
   <property>
     <name>oozie_log_dir</name>
-    <value>/var/log/oozie</value>
+    <value>c:\hadoop\logs\oozie</value>
     <description>Directory for oozie logs</description>
   </property>
   <property>
     <name>oozie_pid_dir</name>
-    <value>/var/run/oozie</value>
+    <value>c:\hadoop\run\oozie</value>
     <description>Directory in which the pid files for oozie reside.</description>
   </property>
-  <property>
-    <name>oozie_admin_port</name>
-    <value>11001</value>
-    <description>The admin port Oozie server runs.</description>
-  </property>
 
-  <!-- oozie-env.sh -->
+  <!-- oozie-env.cmd -->
   <property>
     <name>content</name>
     <description>oozie-env.cmd content</description>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-log4j.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-log4j.xml
deleted file mode 100644
index cb77566..0000000
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-log4j.xml
+++ /dev/null
@@ -1,96 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-
-  <property>
-    <name>content</name>
-    <value>
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License. See accompanying LICENSE file.
-#
-
-# If the Java System property 'oozie.log.dir' is not defined at Oozie start up time
-# XLogService sets its value to '${oozie.home}/logs'
-
-log4j.appender.oozie=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozie.DatePattern='.'yyyy-MM-dd-HH
-log4j.appender.oozie.File=${oozie.log.dir}/oozie.log
-log4j.appender.oozie.Append=true
-log4j.appender.oozie.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozie.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.appender.oozieops=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozieops.DatePattern='.'yyyy-MM-dd
-log4j.appender.oozieops.File=${oozie.log.dir}/oozie-ops.log
-log4j.appender.oozieops.Append=true
-log4j.appender.oozieops.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozieops.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.appender.oozieinstrumentation=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozieinstrumentation.DatePattern='.'yyyy-MM-dd
-log4j.appender.oozieinstrumentation.File=${oozie.log.dir}/oozie-instrumentation.log
-log4j.appender.oozieinstrumentation.Append=true
-log4j.appender.oozieinstrumentation.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozieinstrumentation.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.appender.oozieaudit=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozieaudit.DatePattern='.'yyyy-MM-dd
-log4j.appender.oozieaudit.File=${oozie.log.dir}/oozie-audit.log
-log4j.appender.oozieaudit.Append=true
-log4j.appender.oozieaudit.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozieaudit.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.appender.openjpa=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.openjpa.DatePattern='.'yyyy-MM-dd
-log4j.appender.openjpa.File=${oozie.log.dir}/oozie-jpa.log
-log4j.appender.openjpa.Append=true
-log4j.appender.openjpa.layout=org.apache.log4j.PatternLayout
-log4j.appender.openjpa.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.logger.openjpa=INFO, openjpa
-log4j.logger.oozieops=INFO, oozieops
-log4j.logger.oozieinstrumentation=ALL, oozieinstrumentation
-log4j.logger.oozieaudit=ALL, oozieaudit
-log4j.logger.org.apache.oozie=INFO, oozie
-log4j.logger.org.apache.hadoop=WARN, oozie
-log4j.logger.org.mortbay=WARN, oozie
-log4j.logger.org.hsqldb=WARN, oozie
-log4j.logger.org.apache.hadoop.security.authentication.server=INFO, oozie
-    </value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-site.xml
index 2051d01..739d59b 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/configuration/oozie-site.xml
@@ -16,487 +16,87 @@
   See the License for the specific language governing permissions and
   limitations under the License.
 -->
-<configuration  supports_final="true">
-
-    <!--
-        Refer to the oozie-default.xml file for the complete list of
-        Oozie configuration properties and their default values.
-    -->
-
-    <property>
-        <name>oozie.service.ActionService.executor.ext.classes</name>
-        <value>
-            org.apache.oozie.action.email.EmailActionExecutor,
-            org.apache.oozie.action.hadoop.HiveActionExecutor,
-            org.apache.oozie.action.hadoop.ShellActionExecutor,
-            org.apache.oozie.action.hadoop.SqoopActionExecutor,
-            org.apache.oozie.action.hadoop.DistcpActionExecutor
-        </value>
-    </property>
-
-    <property>
-        <name>oozie.service.SchemaService.wf.ext.schemas</name>
-        <value>
-            shell-action-0.1.xsd,shell-action-0.2.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,
-            hive-action-0.3.xsd,hive-action-0.4.xsd,hive-action-0.5.xsd,sqoop-action-0.2.xsd,
-            sqoop-action-0.3.xsd,sqoop-action-0.4.xsd,ssh-action-0.1.xsd,ssh-action-0.2.xsd,distcp-action-0.1.xsd,
-            oozie-sla-0.1.xsd,oozie-sla-0.2.xsd
-        </value>
-    </property>
-
-    <property>
-        <name>oozie.system.id</name>
-        <value>oozie-${user.name}</value>
-        <description>
-            The Oozie system ID.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.systemmode</name>
-        <value>NORMAL</value>
-        <description>
-            System mode for  Oozie at startup.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.AuthorizationService.security.enabled</name>
-        <value>true</value>
-        <description>
-            Specifies whether security (user name/admin role) is enabled or not.
-            If disabled any user can manage Oozie system and manage any job.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.PurgeService.older.than</name>
-        <value>30</value>
-        <description>
-            Jobs older than this value, in days, will be purged by the PurgeService.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.PurgeService.purge.interval</name>
-        <value>3600</value>
-        <description>
-            Interval at which the purge service will run, in seconds.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.CallableQueueService.queue.size</name>
-        <value>10000</value>
-        <description>Max callable queue size</description>
-    </property>
-
-    <property>
-        <name>oozie.service.CallableQueueService.threads</name>
-        <value>10</value>
-        <description>Number of threads used for executing callables</description>
-    </property>
-
-    <property>
-        <name>oozie.service.CallableQueueService.callable.concurrency</name>
-        <value>3</value>
-        <description>
-            Maximum concurrency for a given callable type.
-            Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
-            Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
-            All commands that use action executors (action-start, action-end, action-kill and action-check) use
-            the action type as the callable type.
-        </description>
-    </property>
-
-    <property>
-		<name>oozie.service.coord.normal.default.timeout
-		</name>
-		<value>120</value>
-		<description>Default timeout for a coordinator action input check (in minutes) for normal job.
-            -1 means infinite timeout</description>
-	</property>
-
-    <property>
-        <name>oozie.db.schema.name</name>
-        <value>oozie</value>
-        <description>
-            Oozie DataBase Name
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.create.db.schema</name>
-        <value>true</value>
-        <description>
-            Creates Oozie DB.
-
-            If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
-            If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.driver</name>
-        <value>com.microsoft.sqlserver.jdbc.SQLServerDriver</value>
-        <description>
-            JDBC driver class.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.url</name>
-        <value></value>
-        <description>
-            JDBC URL.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.username</name>
-        <value>oozie</value>
-        <description>
-            DB user name.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.password</name>
-        <value>oozie</value>
-        <description>
-            DB user password.
-
-            IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
-                       if empty Configuration assumes it is NULL.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.pool.max.active.conn</name>
-        <value>10</value>
-        <description>
-             Max number of connections.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.HadoopAccessorService.kerberos.enabled</name>
-        <value>false</value>
-        <description>
-            Indicates if Oozie is configured to use Kerberos.
-        </description>
-    </property>
-
-    <property>
-        <name>local.realm</name>
-        <value>LOCALHOST</value>
-        <description>
-            Kerberos Realm used by Oozie and Hadoop. Using 'local.realm' to be aligned with Hadoop configuration
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.HadoopAccessorService.keytab.file</name>
-        <value>${user.home}/oozie.keytab</value>
-        <description>
-            Location of the Oozie user keytab file.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.HadoopAccessorService.kerberos.principal</name>
-        <value>${user.name}/localhost@${local.realm}</value>
-        <description>
-            Kerberos principal for Oozie service.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.HadoopAccessorService.jobTracker.whitelist</name>
-        <value> </value>
-        <description>
-            Whitelisted job tracker for Oozie service.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.HadoopAccessorService.nameNode.whitelist</name>
-        <value> </value>
-        <description>
-            Whitelisted job tracker for Oozie service.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
-        <value>*=c:\hdp\hadoop\etc\hadoop</value>
-        <description>
-            Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
-            the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
-            used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
-            the relevant Hadoop *-site.xml files. If the path is relative is looked within
-            the Oozie configuration directory; though the path can be absolute (i.e. to point
-            to Hadoop client conf/ directories in the local filesystem.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.WorkflowAppService.system.libpath</name>
-        <value>/user/${user.name}/share/lib</value>
-        <description>
-            System library path to use for workflow applications.
-            This path is added to workflow application if their job properties sets
-            the property 'oozie.use.system.libpath' to true.
-        </description>
-    </property>
-
-    <property>
-        <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
-        <value>false</value>
-        <description>
-            If set to true, submissions of MapReduce and Pig jobs will include
-            automatically the system library path, thus not requiring users to
-            specify where the Pig JAR files are. Instead, the ones from the system
-            library path are used.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.authentication.type</name>
-        <value>simple</value>
-        <description>
-            Defines authentication used for Oozie HTTP endpoint.
-            Supported values are: simple | basic | kerberos | #AUTHENTICATION_HANDLER_CLASSNAME#
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.authentication.token.validity</name>
-        <value>36000</value>
-        <description>
-            Indicates how long (in seconds) an authentication token is valid before it has
-            to be renewed.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.authentication.signature.secret</name>
-        <value>oozie</value>
-        <description>
-            The signature secret for signing the authentication tokens.
-            If not set a random secret is generated at startup time.
-            In order to authentiation to work correctly across multiple hosts
-            the secret must be the same across al the hosts.
-        </description>
-    </property>
-
-    <!--<property>
-      <name>oozie.authentication.cookie.domain</name>
-      <value></value>
-      <description>
-        The domain to use for the HTTP cookie that stores the authentication token.
-        In order to authentiation to work correctly across multiple hosts
-        the domain must be correctly set.
-      </description>
-    </property>-->
-
-    <property>
-        <name>oozie.authentication.simple.anonymous.allowed</name>
-        <value>true</value>
-        <description>
-            Indicates if anonymous requests are allowed.
-            This setting is meaningful only when using 'simple' authentication.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.authentication.kerberos.principal</name>
-        <value>HTTP/localhost@${local.realm}</value>
-        <description>
-            Indicates the Kerberos principal to be used for HTTP endpoint.
-            The principal MUST start with 'HTTP/' as per Kerberos HTTP SPNEGO specification.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.authentication.kerberos.keytab</name>
-        <value>${oozie.service.HadoopAccessorService.keytab.file}</value>
-        <description>
-            Location of the keytab file with the credentials for the principal.
-            Referring to the same keytab file Oozie uses for its Kerberos credentials for Hadoop.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.authentication.kerberos.name.rules</name>
-        <value>DEFAULT</value>
-        <description>
-            The kerberos names rules is to resolve kerberos principal names, refer to Hadoop's
-            KerberosName for more details.
-        </description>
-    </property>
-
-    <!-- Proxyuser Configuration -->
-
-    <!--
-
-    <property>
-        <name>oozie.service.ProxyUserService.proxyuser.#USER#.hosts</name>
-        <value>*</value>
-        <description>
-            List of hosts the '#USER#' user is allowed to perform 'doAs'
-            operations.
-
-            The '#USER#' must be replaced with the username o the user who is
-            allowed to perform 'doAs' operations.
-
-            The value can be the '*' wildcard or a list of hostnames.
-
-            For multiple users copy this property and replace the user name
-            in the property name.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.ProxyUserService.proxyuser.#USER#.groups</name>
-        <value>*</value>
-        <description>
-            List of groups the '#USER#' user is allowed to impersonate users
-            from to perform 'doAs' operations.
-
-            The '#USER#' must be replaced with the username o the user who is
-            allowed to perform 'doAs' operations.
-
-            The value can be the '*' wildcard or a list of groups.
-
-            For multiple users copy this property and replace the user name
-            in the property name.
-        </description>
-    </property>
-
-    -->
-
-
-  <property>
-    <name>oozie.service.coord.push.check.requeue.interval</name>
-    <value>30000</value>
-  </property>
-
-  <property>
-    <name>oozie.services.ext</name>
-    <value>org.apache.oozie.service.JMSAccessorService,
-                org.apache.oozie.service.PartitionDependencyManagerService,
-                org.apache.oozie.service.HCatAccessorService</value>
-  </property>
+<configuration supports_final="true">
 
+  <!--
+      Refer to the oozie-default.xml file for the complete list of
+      Oozie configuration properties and their default values.
+  -->
   <property>
-    <name>oozie.credentials.credentialclasses</name>
-    <value>hcat=org.apache.oozie.action.hadoop.HCatCredentials</value>
+    <name>oozie.service.JPAService.jdbc.driver</name>
+    <value>com.microsoft.sqlserver.jdbc.SQLServerDriver</value>
+    <description>
+      JDBC driver class.
+    </description>
   </property>
-
   <property>
-    <name>oozie.service.URIHandlerService.uri.handlers</name>
-    <value>org.apache.oozie.dependency.FSURIHandler,
-                org.apache.oozie.dependency.HCatURIHandler</value>
+    <name>oozie.service.JPAService.jdbc.url</name>
+    <value></value>
+    <description>
+      JDBC URL.
+    </description>
   </property>
-
   <property>
-    <name>oozie.service.ELService.ext.functions.coord-job-submit-data</name>
-    <value>now=org.apache.oozie.extensions.OozieELExtensions#ph1_now_echo,
-                today=org.apache.oozie.extensions.OozieELExtensions#ph1_today_echo,
-                yesterday=org.apache.oozie.extensions.OozieELExtensions#ph1_yesterday_echo,
-                currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_currentMonth_echo,
-                lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_lastMonth_echo,
-                currentYear=org.apache.oozie.extensions.OozieELExtensions#ph1_currentYear_echo,
-                lastYear=org.apache.oozie.extensions.OozieELExtensions#ph1_lastYear_echo,
-                dataIn=org.apache.oozie.extensions.OozieELExtensions#ph1_dataIn_echo,
-                instanceTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_nominalTime_echo_wrap,
-                formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo,
-                dateOffset=org.apache.oozie.coord.CoordELFunctions#ph1_coord_dateOffset_echo,
-                user=org.apache.oozie.coord.CoordELFunctions#coord_user</value>
+    <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
+    <value>*=c:\hdp\hadoop\etc\hadoop</value>
+    <description>
+      Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
+      the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
+      used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
+      the relevant Hadoop *-site.xml files. If the path is relative is looked within
+      the Oozie configuration directory; though the path can be absolute (i.e. to point
+      to Hadoop client conf/ directories in the local filesystem.
+    </description>
   </property>
 
+  <!-- New/Updated properties for 2.1 -->
   <property>
-    <name>oozie.service.ProxyUserService.proxyuser.hadoop.hosts</name>
-    <value>*</value>
+    <name>oozie.services</name>
+    <value>
+      org.apache.oozie.service.SchedulerService,
+      org.apache.oozie.service.InstrumentationService,
+      org.apache.oozie.service.CallableQueueService,
+      org.apache.oozie.service.UUIDService,
+      org.apache.oozie.service.ELService,
+      org.apache.oozie.service.AuthorizationService,
+      org.apache.oozie.service.UserGroupInformationService,
+      org.apache.oozie.service.HadoopAccessorService,
+      org.apache.oozie.service.URIHandlerService,
+      org.apache.oozie.service.MemoryLocksService,
+      org.apache.oozie.service.DagXLogInfoService,
+      org.apache.oozie.service.SchemaService,
+      org.apache.oozie.service.LiteWorkflowAppService,
+      org.apache.oozie.service.JPAService,
+      org.apache.oozie.service.StoreService,
+      org.apache.oozie.service.CoordinatorStoreService,
+      org.apache.oozie.service.SLAStoreService,
+      org.apache.oozie.service.DBLiteWorkflowStoreService,
+      org.apache.oozie.service.CallbackService,
+      org.apache.oozie.service.ActionService,
+      org.apache.oozie.service.ActionCheckerService,
+      org.apache.oozie.service.RecoveryService,
+      org.apache.oozie.service.PurgeService,
+      org.apache.oozie.service.CoordinatorEngineService,
+      org.apache.oozie.service.BundleEngineService,
+      org.apache.oozie.service.DagEngineService,
+      org.apache.oozie.service.CoordMaterializeTriggerService,
+      org.apache.oozie.service.StatusTransitService,
+      org.apache.oozie.service.PauseTransitService,
+      org.apache.oozie.service.GroupsService,
+      org.apache.oozie.service.ProxyUserService,
+      org.apache.oozie.service.XLogStreamingService,
+      org.apache.oozie.service.JobsConcurrencyService
+    </value>
+    <description>List of Oozie services</description>
   </property>
 
   <property>
-    <name>oozie.service.ELService.ext.functions.coord-sla-submit</name>
-    <value>instanceTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_nominalTime_echo_fixed,
-                user=org.apache.oozie.coord.CoordELFunctions#coord_user</value>
-  </property>
-
-  <property>
-    <name>oozie.service.ELService.ext.functions.coord-sla-create</name>
-    <value>instanceTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_nominalTime,
-                user=org.apache.oozie.coord.CoordELFunctions#coord_user</value>
-  </property>
-
-  <property>
-    <name>oozie.service.ProxyUserService.proxyuser.hadoop.groups</name>
-    <value>*</value>
-  </property>
-
-  <property>
-    <name>oozie.service.ELService.ext.functions.coord-action-create</name>
-    <value>now=org.apache.oozie.extensions.OozieELExtensions#ph2_now,
-                today=org.apache.oozie.extensions.OozieELExtensions#ph2_today,
-                yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday,
-                currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth,
-                lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth,
-                currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear,
-                lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear,
-                latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,
-                future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo,
-                formatTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_formatTime,
-                user=org.apache.oozie.coord.CoordELFunctions#coord_user</value>
-  </property>
-
-  <property>
-    <name>oozie.service.ELService.ext.functions.coord-job-submit-instances</name>
-    <value>now=org.apache.oozie.extensions.OozieELExtensions#ph1_now_echo,
-                today=org.apache.oozie.extensions.OozieELExtensions#ph1_today_echo,
-                yesterday=org.apache.oozie.extensions.OozieELExtensions#ph1_yesterday_echo,
-                currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_currentMonth_echo,
-                lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_lastMonth_echo,
-                currentYear=org.apache.oozie.extensions.OozieELExtensions#ph1_currentYear_echo,
-                lastYear=org.apache.oozie.extensions.OozieELExtensions#ph1_lastYear_echo,
-                formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo,
-                latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,
-                future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo</value>
-  </property>
-
-  <property>
-    <name>oozie.service.ELService.ext.functions.coord-action-start</name>
-    <value>now=org.apache.oozie.extensions.OozieELExtensions#ph2_now,
-                today=org.apache.oozie.extensions.OozieELExtensions#ph2_today,
-                yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday,
-                currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth,
-                lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth,
-                currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear,
-                lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear,
-                latest=org.apache.oozie.coord.CoordELFunctions#ph3_coord_latest,
-                future=org.apache.oozie.coord.CoordELFunctions#ph3_coord_future,
-                dataIn=org.apache.oozie.extensions.OozieELExtensions#ph3_dataIn,
-                instanceTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_nominalTime,
-                dateOffset=org.apache.oozie.coord.CoordELFunctions#ph3_coord_dateOffset,
-                formatTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_formatTime,
-                user=org.apache.oozie.coord.CoordELFunctions#coord_user</value>
-  </property>
-
-  <property>
-    <name>oozie.service.ELService.ext.functions.coord-action-create-inst</name>
-    <value>now=org.apache.oozie.extensions.OozieELExtensions#ph2_now_inst,
-                today=org.apache.oozie.extensions.OozieELExtensions#ph2_today_inst,
-                yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday_inst,
-                currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth_inst,
-                lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth_inst,
-                currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear_inst,
-                lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear_inst,
-                latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,
-                future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo,
-                formatTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_formatTime,
-                user=org.apache.oozie.coord.CoordELFunctions#coord_user</value>
+    <name>oozie.services.ext</name>
+    <value>org.apache.oozie.service.JMSAccessorService,org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService
+    </value>
+    <description>
+      To add/replace services defined in 'oozie.services' with custom implementations.
+      Class names must be separated by commas.
+    </description>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/metainfo.xml
index 347954b..b2b394c 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/OOZIE/metainfo.xml
@@ -20,107 +20,20 @@
   <services>
     <service>
       <name>OOZIE</name>
-      <displayName>Oozie</displayName>
-      <comment>System for workflow coordination and execution of Apache Hadoop jobs.  This also includes the installation of the optional Oozie Web Console which relies on and will install the &lt;a target="_blank" href="http://www.sencha.com/legal/open-source-faq/"&gt;ExtJS&lt;/a&gt; Library.
-      </comment>
-      <version>4.0.0.2.0</version>
+      <extends>common-services/OOZIE/4.0.0.2.0</extends>
+      <version>4.0.0.2.1.1.0</version>
       <components>
         <component>
-          <name>OOZIE_SERVER</name>
-          <displayName>Oozie Server</displayName>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>YARN/YARN_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/oozie_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
           <name>OOZIE_CLIENT</name>
-          <displayName>Oozie Client</displayName>
-          <category>CLIENT</category>
-          <cardinality>1+</cardinality>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/oozie_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
           <configFiles>
             <configFile>
-              <type>xml</type>
-              <fileName>oozie-site.xml</fileName>
-              <dictionaryName>oozie-site</dictionaryName>
-            </configFile>
-            <configFile>
               <type>env</type>
               <fileName>oozie-env.cmd</fileName>
               <dictionaryName>oozie-env</dictionaryName>
             </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>oozie-log4j.properties</fileName>
-              <dictionaryName>oozie-log4j</dictionaryName>
-            </configFile>
           </configFiles>
         </component>
       </components>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <requiredServices>
-        <service>YARN</service>
-      </requiredServices>
-
-      <configuration-dependencies>
-        <config-type>oozie-site</config-type>
-        <config-type>oozie-env</config-type>
-        <config-type>oozie-log4j</config-type>
-        <config-type>yarn-site</config-type>
-      </configuration-dependencies>
     </service>
   </services>
 </metainfo>