You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jl...@apache.org on 2014/12/31 06:44:42 UTC

[3/3] ambari git commit: AMBARI-8876: Common Services: Refactor HDPWIN 2.1 stack to use common services (Jayush Luniya)

AMBARI-8876: Common Services: Refactor HDPWIN 2.1 stack to use common services (Jayush Luniya)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/af6f6e87
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/af6f6e87
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/af6f6e87

Branch: refs/heads/trunk
Commit: af6f6e877209d5ab5d25fe2858259c234137eae5
Parents: 8567905
Author: Jayush Luniya <jl...@hortonworks.com>
Authored: Tue Dec 30 21:44:28 2014 -0800
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Tue Dec 30 21:44:28 2014 -0800

----------------------------------------------------------------------
 .../FALCON/configuration/falcon-env.xml         |  68 +-
 .../configuration/falcon-runtime.properties.xml |  47 --
 .../configuration/falcon-startup.properties.xml | 207 -------
 .../FALCON/configuration/oozie-site.xml         | 167 -----
 .../HDPWIN/2.1/services/FALCON/metainfo.xml     |  70 +--
 .../services/HBASE/configuration/hbase-env.xml  |  29 +-
 .../HBASE/configuration/hbase-policy.xml        |  53 --
 .../services/HBASE/configuration/hbase-site.xml | 292 +--------
 .../HDPWIN/2.1/services/HBASE/metainfo.xml      |  93 +--
 .../services/HDFS/configuration/core-site.xml   | 213 +------
 .../services/HDFS/configuration/hadoop-env.xml  | 119 ++++
 .../HDFS/configuration/hadoop-policy.xml        | 219 -------
 .../services/HDFS/configuration/hdfs-site.xml   | 274 ++-------
 .../HDPWIN/2.1/services/HDFS/metainfo.xml       | 128 +---
 .../services/HIVE/configuration/hcat-env.xml    |  31 +
 .../services/HIVE/configuration/hive-env.xml    |  42 +-
 .../services/HIVE/configuration/hive-site.xml   | 282 ++++-----
 .../services/HIVE/configuration/webhcat-env.xml |  31 +
 .../HIVE/configuration/webhcat-site.xml         |  51 +-
 .../HDPWIN/2.1/services/HIVE/metainfo.xml       | 165 +----
 .../services/OOZIE/configuration/oozie-env.xml  |  21 +-
 .../OOZIE/configuration/oozie-log4j.xml         |  96 ---
 .../services/OOZIE/configuration/oozie-site.xml | 540 +++-------------
 .../HDPWIN/2.1/services/OOZIE/metainfo.xml      |  91 +--
 .../services/PIG/configuration/pig-log4j.xml    |  61 --
 .../PIG/configuration/pig-properties.xml        | 262 --------
 .../stacks/HDPWIN/2.1/services/PIG/metainfo.xml |  39 +-
 .../services/SQOOP/configuration/sqoop-env.xml  |  10 +-
 .../HDPWIN/2.1/services/SQOOP/metainfo.xml      |  45 +-
 .../services/STORM/configuration/storm-env.xml  |  47 ++
 .../services/STORM/configuration/storm-site.xml | 616 +------------------
 .../HDPWIN/2.1/services/STORM/metainfo.xml      |  66 +-
 .../2.1/services/TEZ/configuration/tez-env.xml  |  36 ++
 .../2.1/services/TEZ/configuration/tez-site.xml | 193 +-----
 .../stacks/HDPWIN/2.1/services/TEZ/metainfo.xml |  28 +-
 .../HDPWIN/2.1/services/YARN/metainfo.xml       |   4 +-
 .../ZOOKEEPER/configuration/zoo.cfg.xml         |  22 +-
 .../ZOOKEEPER/configuration/zookeeper-env.xml   |  50 +-
 .../ZOOKEEPER/configuration/zookeeper-log4j.xml | 100 ---
 .../HDPWIN/2.1/services/ZOOKEEPER/metainfo.xml  |  42 +-
 ambari-web/app/utils/validator.js               |   8 +-
 41 files changed, 641 insertions(+), 4317 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/falcon-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/falcon-env.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/falcon-env.xml
index 0a12051..6ececc6 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/falcon-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/falcon-env.xml
@@ -21,89 +21,35 @@
 <configuration>
   <property>
     <name>falcon_user</name>
-    <value>falcon</value>
-    <description>Falcon user.</description>
-  </property>
-  <property>
-    <name>falcon_port</name>
-    <value>15000</value>
-    <description>Port the Falcon Server listens on.</description>
+    <deleted>true</deleted>
   </property>
   <property>
     <name>falcon_log_dir</name>
-    <value>/var/log/falcon</value>
+    <value>c:\hadoop\logs\falcon</value>
     <description>Falcon log directory.</description>
   </property>
   <property>
     <name>falcon_pid_dir</name>
-    <value>/var/run/falcon</value>
+    <value>c:\hadoop\run\falcon</value>
     <description>Falcon pid-file directory.</description>
   </property>
   <property>
     <name>falcon_local_dir</name>
-    <value>/hadoop/falcon</value>
+    <value>c:\hadoop\falcon</value>
     <description>Directory where Falcon data, such as activemq data, is stored.</description>
   </property>
   <!--embeddedmq properties-->
   <property>
     <name>falcon.embeddedmq.data</name>
-    <value>/hadoop/falcon/embeddedmq/data</value>
+    <value>c:\hadoop\falcon\embeddedmq\data</value>
     <description>Directory in which embeddedmq data is stored.</description>
   </property>
-  <property>
-    <name>falcon.embeddedmq</name>
-    <value>true</value>
-    <description>Whether embeddedmq is enabled or not.</description>
-  </property>
-  <property>
-    <name>falcon.emeddedmq.port</name>
-    <value>61616</value>
-    <description>Port that embeddedmq will listen on.</description>
-  </property>
 
-  <!-- falcon-env.sh -->
+  <!-- falcon-env.cmd -->
   <property>
     <name>content</name>
-    <description>falcon-env.sh content</description>
+    <description>This is the jinja template for falcon-env.cmd file</description>
     <value>
-# The java implementation to use. If JAVA_HOME is not found we expect java and jar to be in path
-export JAVA_HOME={{java_home}}
-
-# any additional java opts you want to set. This will apply to both client and server operations
-#export FALCON_OPTS=
-
-# any additional java opts that you want to set for client only
-#export FALCON_CLIENT_OPTS=
-
-# java heap size we want to set for the client. Default is 1024MB
-#export FALCON_CLIENT_HEAP=
-
-# any additional opts you want to set for prisim service.
-#export FALCON_PRISM_OPTS=
-
-# java heap size we want to set for the prisim service. Default is 1024MB
-#export FALCON_PRISM_HEAP=
-
-# any additional opts you want to set for falcon service.
-export FALCON_SERVER_OPTS="-Dfalcon.embeddedmq={{falcon_embeddedmq_enabled}} -Dfalcon.emeddedmq.port={{falcon_emeddedmq_port}}"
-
-# java heap size we want to set for the falcon server. Default is 1024MB
-#export FALCON_SERVER_HEAP=
-
-# What is is considered as falcon home dir. Default is the base locaion of the installed software
-#export FALCON_HOME_DIR=
-
-# Where log files are stored. Defatult is logs directory under the base install location
-export FALCON_LOG_DIR={{falcon_log_dir}}
-
-# Where pid files are stored. Defatult is logs directory under the base install location
-export FALCON_PID_DIR={{falcon_pid_dir}}
-
-# where the falcon active mq data is stored. Defatult is logs/data directory under the base install location
-export FALCON_DATA_DIR={{falcon_embeddedmq_data}}
-
-# Where do you want to expand the war file. By Default it is in /server/webapp dir under the base install dir.
-#export FALCON_EXPANDED_WEBAPP_DIR=
     </value>
   </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/falcon-runtime.properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/falcon-runtime.properties.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/falcon-runtime.properties.xml
deleted file mode 100644
index 94c8755..0000000
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/falcon-runtime.properties.xml
+++ /dev/null
@@ -1,47 +0,0 @@
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false">
-  <property>
-    <name>*.domain</name>
-    <value>${falcon.app.type}</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.log.cleanup.frequency.minutes.retention</name>
-    <value>hours(6)</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.log.cleanup.frequency.hours.retention</name>
-    <value>minutes(1)</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.log.cleanup.frequency.days.retention</name>
-    <value>days(7)</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.log.cleanup.frequency.months.retention</name>
-    <value>months(3)</value>
-    <description></description>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/falcon-startup.properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/falcon-startup.properties.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/falcon-startup.properties.xml
deleted file mode 100644
index 7459429..0000000
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/falcon-startup.properties.xml
+++ /dev/null
@@ -1,207 +0,0 @@
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false">
-  <!--advanced properties-->
-  <property>
-    <name>*.workflow.engine.impl</name>
-    <value>org.apache.falcon.workflow.engine.OozieWorkflowEngine</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.oozie.process.workflow.builder</name>
-    <value>org.apache.falcon.workflow.OozieProcessWorkflowBuilder</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.oozie.feed.workflow.builder</name>
-    <value>org.apache.falcon.workflow.OozieFeedWorkflowBuilder</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.SchedulableEntityManager.impl</name>
-    <value>org.apache.falcon.resource.SchedulableEntityManager</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.ConfigSyncService.impl</name>
-    <value>org.apache.falcon.resource.ConfigSyncService</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.ProcessInstanceManager.impl</name>
-    <value>org.apache.falcon.resource.InstanceManager</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.catalog.service.impl</name>
-    <value>org.apache.falcon.catalog.HiveCatalogService</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.application.services</name>
-    <value>org.apache.falcon.security.AuthenticationInitializationService,\
-      org.apache.falcon.service.ProcessSubscriberService,\
-      org.apache.falcon.entity.store.ConfigurationStore,\
-      org.apache.falcon.rerun.service.RetryService,\
-      org.apache.falcon.rerun.service.LateRunService,\
-      org.apache.falcon.service.LogCleanupService
-    </value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.configstore.listeners</name>
-    <value>org.apache.falcon.entity.v0.EntityGraph,\
-      org.apache.falcon.entity.ColoClusterRelation,\
-      org.apache.falcon.group.FeedGroupMap,\
-      org.apache.falcon.service.SharedLibraryHostingService
-    </value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.broker.impl.class</name>
-    <value>org.apache.activemq.ActiveMQConnectionFactory</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.shared.libs</name>
-    <value>activemq-core,ant,geronimo-j2ee-management,hadoop-distcp,jms,json-simple,oozie-client,spring-jms</value>
-    <description></description>
-  </property>
-  <!--common properties-->
-  <property>
-    <name>*.domain</name>
-    <value>${falcon.app.type}</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.config.store.uri</name>
-    <value>file:///hadoop/falcon/store</value>
-    <description>Location to store user entity configurations</description>
-  </property>
-  <property>
-    <name>*.system.lib.location</name>
-    <value>${falcon.home}/server/webapp/${falcon.app.type}/WEB-INF/lib</value>
-    <description>Location of libraries that is shipped to Hadoop</description>
-  </property>
-  <property>
-    <name>*.retry.recorder.path</name>
-    <value>${falcon.log.dir}/retry</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.falcon.cleanup.service.frequency</name>
-    <value>days(1)</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.broker.url</name>
-    <value>tcp://localhost:61616</value>
-    <description>Default Active MQ url</description>
-  </property>
-  <property>
-    <name>*.broker.ttlInMins</name>
-    <value>4320</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.entity.topic</name>
-    <value>FALCON.ENTITY.TOPIC</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.max.retry.failure.count</name>
-    <value>1</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.internal.queue.size</name>
-    <value>1000</value>
-    <description></description>
-  </property>
-  <!--properties without default values-->
-  <property>
-    <name>*.falcon.http.authentication.cookie.domain</name>
-    <value>EXAMPLE.COM</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.falcon.http.authentication.blacklisted.users</name>
-    <value></value>
-    <description>Comma separated list of black listed users</description>
-  </property>
-  <!--authentication properties-->
-  <property>
-    <name>*.falcon.authentication.type</name>
-    <value>simple</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.falcon.http.authentication.type</name>
-    <value>simple</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.falcon.http.authentication.token.validity</name>
-    <value>36000</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.falcon.http.authentication.signature.secret</name>
-    <value>falcon</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.falcon.http.authentication.simple.anonymous.allowed</name>
-    <value>true</value>
-    <description>Indicates if anonymous requests are allowed when using 'simple' authentication</description>
-  </property>
-  <property>
-    <name>*.falcon.http.authentication.kerberos.name.rules</name>
-    <value>DEFAULT</value>
-    <description>The kerberos names rules is to resolve kerberos principal names, refer to Hadoop's KerberosName for more details.</description>
-  </property>
-  <!--kerberos params, must be set during security enabling-->
-  <property>
-    <name>*.falcon.service.authentication.kerberos.principal</name>
-    <value>falcon/_HOST@EXAMPLE.COM</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.falcon.service.authentication.kerberos.keytab</name>
-    <value>/etc/security/keytabs/falcon.service.keytab</value>
-    <description></description>
-  </property>
-  <property>
-    <name>*.dfs.namenode.kerberos.principal</name>
-    <value>nn/_HOST@EXAMPLE.COM</value>
-    <description>name node principal to talk to config store</description>
-  </property>
-  <property>
-    <name>*.falcon.http.authentication.kerberos.principal</name>
-    <value>HTTP/_HOST@EXAMPLE.COM</value>
-    <description>Indicates the Kerberos principal to be used for HTTP endpoint</description>
-  </property>
-  <property>
-    <name>*.falcon.http.authentication.kerberos.keytab</name>
-    <value>/etc/security/keytabs/spnego.service.keytab</value>
-    <description>Location of the keytab file with the credentials for the HTTP principal</description>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/oozie-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/oozie-site.xml
deleted file mode 100644
index 4b0bf70..0000000
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/configuration/oozie-site.xml
+++ /dev/null
@@ -1,167 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<configuration supports_final="true">
-  <property>
-    <name>oozie.service.ELService.ext.functions.coord-job-submit-instances</name>
-    <value>
-      now=org.apache.oozie.extensions.OozieELExtensions#ph1_now_echo,
-      today=org.apache.oozie.extensions.OozieELExtensions#ph1_today_echo,
-      yesterday=org.apache.oozie.extensions.OozieELExtensions#ph1_yesterday_echo,
-      currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_currentMonth_echo,
-      lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_lastMonth_echo,
-      currentYear=org.apache.oozie.extensions.OozieELExtensions#ph1_currentYear_echo,
-      lastYear=org.apache.oozie.extensions.OozieELExtensions#ph1_lastYear_echo,
-      formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo,
-      latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,
-      future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo
-    </value>
-    <description>
-      EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
-      This property is a convenience property to add extensions to the built in executors without having to
-      include all the built in ones.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.ELService.ext.functions.coord-action-create-inst</name>
-    <value>
-      now=org.apache.oozie.extensions.OozieELExtensions#ph2_now_inst,
-      today=org.apache.oozie.extensions.OozieELExtensions#ph2_today_inst,
-      yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday_inst,
-      currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth_inst,
-      lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth_inst,
-      currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear_inst,
-      lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear_inst,
-      latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,
-      future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo,
-      formatTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_formatTime,
-      user=org.apache.oozie.coord.CoordELFunctions#coord_user
-    </value>
-    <description>
-      EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
-      This property is a convenience property to add extensions to the built in executors without having to
-      include all the built in ones.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.ELService.ext.functions.coord-action-create</name>
-    <value>
-      now=org.apache.oozie.extensions.OozieELExtensions#ph2_now,
-      today=org.apache.oozie.extensions.OozieELExtensions#ph2_today,
-      yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday,
-      currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth,
-      lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth,
-      currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear,
-      lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear,
-      latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,
-      future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo,
-      formatTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_formatTime,
-      user=org.apache.oozie.coord.CoordELFunctions#coord_user
-    </value>
-    <description>
-      EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
-      This property is a convenience property to add extensions to the built in executors without having to
-      include all the built in ones.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.ELService.ext.functions.coord-job-submit-data</name>
-    <value>
-      now=org.apache.oozie.extensions.OozieELExtensions#ph1_now_echo,
-      today=org.apache.oozie.extensions.OozieELExtensions#ph1_today_echo,
-      yesterday=org.apache.oozie.extensions.OozieELExtensions#ph1_yesterday_echo,
-      currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_currentMonth_echo,
-      lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_lastMonth_echo,
-      currentYear=org.apache.oozie.extensions.OozieELExtensions#ph1_currentYear_echo,
-      lastYear=org.apache.oozie.extensions.OozieELExtensions#ph1_lastYear_echo,
-      dataIn=org.apache.oozie.extensions.OozieELExtensions#ph1_dataIn_echo,
-      instanceTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_nominalTime_echo_wrap,
-      formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo,
-      dateOffset=org.apache.oozie.coord.CoordELFunctions#ph1_coord_dateOffset_echo,
-      user=org.apache.oozie.coord.CoordELFunctions#coord_user
-    </value>
-    <description>
-      EL constant declarations, separated by commas, format is [PREFIX:]NAME=CLASS#CONSTANT.
-      This property is a convenience property to add extensions to the built in executors without having to
-      include all the built in ones.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.ELService.ext.functions.coord-action-start</name>
-    <value>
-      now=org.apache.oozie.extensions.OozieELExtensions#ph2_now,
-      today=org.apache.oozie.extensions.OozieELExtensions#ph2_today,
-      yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday,
-      currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth,
-      lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth,
-      currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear,
-      lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear,
-      latest=org.apache.oozie.coord.CoordELFunctions#ph3_coord_latest,
-      future=org.apache.oozie.coord.CoordELFunctions#ph3_coord_future,
-      dataIn=org.apache.oozie.extensions.OozieELExtensions#ph3_dataIn,
-      instanceTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_nominalTime,
-      dateOffset=org.apache.oozie.coord.CoordELFunctions#ph3_coord_dateOffset,
-      formatTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_formatTime,
-      user=org.apache.oozie.coord.CoordELFunctions#coord_user
-    </value>
-    <description>
-      EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
-      This property is a convenience property to add extensions to the built in executors without having to
-      include all the built in ones.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.ELService.ext.functions.coord-sla-submit</name>
-    <value>
-      instanceTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_nominalTime_echo_fixed,
-      user=org.apache.oozie.coord.CoordELFunctions#coord_user
-    </value>
-    <description>
-      EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.ELService.ext.functions.coord-sla-create</name>
-    <value>
-      instanceTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_nominalTime,
-      user=org.apache.oozie.coord.CoordELFunctions#coord_user
-    </value>
-    <description>
-      EL functions declarations, separated by commas, format is [PREFIX:]NAME=CLASS#METHOD.
-    </description>
-  </property>
-  <!--web ui should add following properties to oozie site accordingly to FALCON_USER-->
-  <!--<property>-->
-    <!--<name>oozie.service.ProxyUserService.proxyuser.#FALCON_USER#.hosts</name>-->
-    <!--<value>*</value>-->
-    <!--<description>Falcon proxyuser hosts</description>-->
-  <!--</property>-->
-
-  <!--<property>-->
-    <!--<name>oozie.service.ProxyUserService.proxyuser.#FALCON_USER#.groups</name>-->
-    <!--<value>*</value>-->
-    <!--<description>Falcon proxyuser groups</description>-->
-  <!--</property>-->
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/metainfo.xml
index 7938777..14b4c82 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/FALCON/metainfo.xml
@@ -21,86 +21,20 @@
     <service>
       <name>FALCON</name>
       <displayName>Falcon</displayName>
-      <comment>Data management and processing platform</comment>
-      <version>0.5.0.2.1</version>
+      <extends>common-services/FALCON/0.5.0.2.1</extends>
+      <version>0.5.0.2.1.1.0</version>
       <components>
         <component>
           <name>FALCON_CLIENT</name>
-          <displayName>Falcon Client</displayName>
-          <category>CLIENT</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts/falcon_client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
           <configFiles>
             <configFile>
               <type>env</type>
               <fileName>falcon-env.cmd</fileName>
               <dictionaryName>falcon-env</dictionaryName>
             </configFile>
-            <configFile>
-              <type>xml</type>
-              <fileName>runtime.properties</fileName>
-              <dictionaryName>falcon-runtime.properties</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>xml</type>
-              <fileName>startup.properties</fileName>
-              <dictionaryName>falcon-startup.properties</dictionaryName>
-            </configFile>
           </configFiles>
         </component>
-        <component>
-          <name>FALCON_SERVER</name>
-          <displayName>Falcon Server</displayName>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <dependencies>
-            <dependency>
-              <name>OOZIE/OOZIE_SERVER</name>
-              <scope>cluster</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>OOZIE/OOZIE_CLIENT</name>
-              <scope>cluster</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/falcon_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
       </components>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <requiredServices>
-        <service>OOZIE</service>
-      </requiredServices>
-
-      <configuration-dependencies>
-        <config-type>falcon-env</config-type>
-        <config-type>falcon-startup.properties</config-type>
-        <config-type>falcon-runtime.properties</config-type>
-      </configuration-dependencies>
-
-      <excluded-config-types>
-        <config-type>oozie-site</config-type>
-      </excluded-config-types>
-
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/configuration/hbase-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/configuration/hbase-env.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/configuration/hbase-env.xml
index fc2ed7f..642e746 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/configuration/hbase-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/configuration/hbase-env.xml
@@ -23,39 +23,18 @@
 <configuration>
   <property>
     <name>hbase_log_dir</name>
-    <value>/var/log/hbase</value>
+    <value>c:\hadoop\logs\hbase</value>
     <description>Log Directories for HBase.</description>
   </property>
   <property>
     <name>hbase_pid_dir</name>
-    <value>/var/run/hbase</value>
+    <value>c:\hadoop\run\hbase</value>
     <description>Pid Directory for HBase.</description>
   </property>
+
   <property>
-    <name>hbase_regionserver_heapsize</name>
-    <value>1024</value>
-    <description>HBase RegionServer Heap Size.</description>
-  </property>
-  <property>
-    <name>hbase_regionserver_xmn_max</name>
-    <value>512</value>
-    <description>HBase RegionServer maximum value for minimum heap size.</description>
-  </property>
-  <property>
-    <name>hbase_regionserver_xmn_ratio</name>
-    <value>0.2</value>
-    <description>HBase RegionServer minimum heap size is calculated as a percentage of max heap size.</description>
-  </property>
-  <property>
-    <name>hbase_master_heapsize</name>
-    <value>1024</value>
-    <description>HBase Master Heap Size</description>
-  </property>
-   <property>
     <name>hbase_user</name>
-    <value>hbase</value>
-    <property-type>USER</property-type>
-    <description>HBase User Name.</description>
+    <deleted>true</deleted>
   </property>
 
   <!-- hbase-env.cmd -->

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/configuration/hbase-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/configuration/hbase-policy.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/configuration/hbase-policy.xml
deleted file mode 100644
index b0807b6..0000000
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/configuration/hbase-policy.xml
+++ /dev/null
@@ -1,53 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="true">
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HRegionInterface protocol implementations (ie.
-    clients talking to HRegionServers)
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.admin.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HMasterInterface protocol implementation (ie.
-    clients talking to HMaster for admin operations).
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.masterregion.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HMasterRegionInterface protocol implementations
-    (for HRegionServers communicating with HMaster)
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/configuration/hbase-site.xml
index cbaaacd..d551d4d 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/configuration/hbase-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/configuration/hbase-site.xml
@@ -21,298 +21,16 @@
 -->
 <configuration supports_final="true">
   <property>
-    <name>hbase.rootdir</name>
-    <value>hdfs://localhost:8020/apps/hbase/data</value>
-    <description>The directory shared by region servers and into
-      which HBase persists.  The URL should be 'fully-qualified'
-      to include the filesystem scheme.  For example, to specify the
-      HDFS directory '/hbase' where the HDFS instance's namenode is
-      running at namenode.example.org on port 9000, set this value to:
-      hdfs://namenode.example.org:9000/hbase.  By default HBase writes
-      into /tmp.  Change this configuration else all data will be lost
-      on machine restart.
-    </description>
-  </property>
-  <property>
-    <name>hbase.cluster.distributed</name>
-    <value>true</value>
-    <description>The mode the cluster will be in. Possible values are
-      false for standalone mode and true for distributed mode.  If
-      false, startup will run all HBase and ZooKeeper daemons together
-      in the one JVM.
-    </description>
+    <name>dfs.domain.socket.path</name>
+    <deleted>true</deleted>
   </property>
   <property>
-    <name>hbase.master.port</name>
-    <value>60000</value>
-    <description>The port the HBase Master should bind to.</description>
-  </property>
-  <property >
     <name>hbase.tmp.dir</name>
-    <value>${java.io.tmpdir}/hbase-${user.name}</value>
+    <value>c:\hadoop\temp\hbase</value>
     <description>Temporary directory on the local filesystem.
       Change this setting to point to a location more permanent
-      than '/tmp', the usual resolve for java.io.tmpdir, as the
-      '/tmp' directory is cleared on machine restart.</description>
-  </property>
-  <property>
-    <name>hbase.local.dir</name>
-    <value>${hbase.tmp.dir}/local</value>
-    <description>Directory on the local filesystem to be used as a local storage
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.bindAddress</name>
-    <value>0.0.0.0</value>
-    <description>The bind address for the HBase Master web UI
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.port</name>
-    <value>60010</value>
-    <description>The port for the HBase Master web UI.</description>
-  </property>
-  <property>
-    <name>hbase.regionserver.info.port</name>
-    <value>60030</value>
-    <description>The port for the HBase RegionServer web UI.</description>
-  </property>
-  <property>
-    <name>hbase.regionserver.global.memstore.upperLimit</name>
-    <value>0.4</value>
-    <description>Maximum size of all memstores in a region server before new
-      updates are blocked and flushes are forced. Defaults to 40% of heap
+      than '/tmp' (The '/tmp' directory is often cleared on
+      machine restart).
     </description>
   </property>
-  <property>
-    <name>hbase.regionserver.handler.count</name>
-    <value>30</value>
-    <description>Count of RPC Listener instances spun up on RegionServers.
-      Same property is used by the Master for count of master handlers.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.majorcompaction</name>
-    <value>604800000</value>
-    <description>The time (in miliseconds) between 'major' compactions of all
-      HStoreFiles in a region.  Default: Set to 7 days.  Major compactions tend to
-      happen exactly when you need them least so enable them such that they run at
-      off-peak for your deploy; or, since this setting is on a periodicity that is
-      unlikely to match your loading, run the compactions via an external
-      invocation out of a cron job or some such.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.regionserver.global.memstore.lowerLimit</name>
-    <value>0.38</value>
-    <description>When memstores are being forced to flush to make room in
-      memory, keep flushing until we hit this mark. Defaults to 35% of heap.
-      This value equal to hbase.regionserver.global.memstore.upperLimit causes
-      the minimum possible flushing to occur when updates are blocked due to
-      memstore limiting.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.block.multiplier</name>
-    <value>2</value>
-    <description>Block updates if memstore has hbase.hregion.memstore.block.multiplier
-      time hbase.hregion.flush.size bytes.  Useful preventing
-      runaway memstore during spikes in update traffic.  Without an
-      upper-bound, memstore fills such that when it flushes the
-      resultant flush files take a long time to compact or split, or
-      worse, we OOME
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.flush.size</name>
-    <value>134217728</value>
-    <description>
-      Memstore will be flushed to disk if size of the memstore
-      exceeds this number of bytes.  Value is checked by a thread that runs
-      every hbase.server.thread.wakefrequency.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.mslab.enabled</name>
-    <value>true</value>
-    <description>
-      Enables the MemStore-Local Allocation Buffer,
-      a feature which works to prevent heap fragmentation under
-      heavy write loads. This can reduce the frequency of stop-the-world
-      GC pauses on large heaps.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.max.filesize</name>
-    <value>10737418240</value>
-    <description>
-      Maximum HStoreFile size. If any one of a column families' HStoreFiles has
-      grown to exceed this value, the hosting HRegion is split in two.
-      Default: 1G.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.scanner.caching</name>
-    <value>100</value>
-    <description>Number of rows that will be fetched when calling next
-      on a scanner if it is not served from (local, client) memory. Higher
-      caching values will enable faster scanners but will eat up more memory
-      and some calls of next may take longer and longer times when the cache is empty.
-      Do not set this value such that the time between invocations is greater
-      than the scanner timeout; i.e. hbase.regionserver.lease.period
-    </description>
-  </property>
-  <property>
-    <name>zookeeper.session.timeout</name>
-    <value>90000</value>
-    <description>ZooKeeper session timeout.
-      HBase passes this to the zk quorum as suggested maximum time for a
-      session (This setting becomes zookeeper's 'maxSessionTimeout').  See
-      http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
-      "The client sends a requested timeout, the server responds with the
-      timeout that it can give the client. " In milliseconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.keyvalue.maxsize</name>
-    <value>10485760</value>
-    <description>Specifies the combined maximum allowed size of a KeyValue
-      instance. This is to set an upper boundary for a single entry saved in a
-      storage file. Since they cannot be split it helps avoiding that a region
-      cannot be split any further because the data is too large. It seems wise
-      to set this to a fraction of the maximum region size. Setting it to zero
-      or less disables the check.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.compactionThreshold</name>
-    <value>3</value>
-    <description>
-      If more than this number of HStoreFiles in any one HStore
-      (one HStoreFile is written per flush of memstore) then a compaction
-      is run to rewrite all HStoreFiles files as one.  Larger numbers
-      put off compaction but when it runs, it takes longer to complete.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.flush.retries.number</name>
-    <value>120</value>
-    <description>
-      The number of times the region flush operation will be retried.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.hstore.blockingStoreFiles</name>
-    <value>10</value>
-    <description>
-      If more than this number of StoreFiles in any one Store
-      (one StoreFile is written per flush of MemStore) then updates are
-      blocked for this HRegion until a compaction is completed, or
-      until hbase.hstore.blockingWaitTime has been exceeded.
-    </description>
-  </property>
-  <property>
-    <name>hfile.block.cache.size</name>
-    <value>0.4</value>
-    <description>
-      Percentage of maximum heap (-Xmx setting) to allocate to block cache
-      used by HFile/StoreFile. Default of 0.25 means allocate 25%.
-      Set to 0 to disable but it's not recommended.
-    </description>
-  </property>
-
-
-  <!-- Additional configuration specific to HBase security -->
-  <property>
-    <name>hbase.security.authentication</name>
-    <value>simple</value>
-    <description>  Controls whether or not secure authentication is enabled for HBase. Possible values are 'simple'
-      (no authentication), and 'kerberos'.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.security.authorization</name>
-    <value>false</value>
-    <description>Enables HBase authorization. Set the value of this property to false to disable HBase authorization.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.region.classes</name>
-    <value></value>
-    <description>A comma-separated list of Coprocessors that are loaded by
-      default on all tables. For any override coprocessor method, these classes
-      will be called in order. After implementing your own Coprocessor, just put
-      it in HBase's classpath and add the fully qualified class name here.
-      A coprocessor can also be loaded on demand by setting HTableDescriptor.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.master.classes</name>
-    <value></value>
-    <description>A comma-separated list of
-      org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
-      loaded by default on the active HMaster process. For any implemented
-      coprocessor methods, the listed classes will be called in order. After
-      implementing your own MasterObserver, just put it in HBase's classpath
-      and add the fully qualified class name here.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.zookeeper.property.clientPort</name>
-    <value>2181</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-      The port at which the clients will connect.
-    </description>
-  </property>
-
-  <!--
-  The following three properties are used together to create the list of
-  host:peer_port:leader_port quorum servers for ZooKeeper.
-  -->
-  <property>
-    <name>hbase.zookeeper.quorum</name>
-    <value>localhost</value>
-    <description>Comma separated list of servers in the ZooKeeper Quorum.
-      For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
-      By default this is set to localhost for local and pseudo-distributed modes
-      of operation. For a fully-distributed setup, this should be set to a full
-      list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
-      this is the list of servers which we will start/stop ZooKeeper on.
-    </description>
-  </property>
-  <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
-
-  <property>
-    <name>hbase.zookeeper.useMulti</name>
-    <value>false</value>
-    <description>Instructs HBase to make use of ZooKeeper's multi-update functionality.
-      This allows certain ZooKeeper operations to complete more quickly and prevents some issues
-      with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).ยท
-      IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+
-      and will not be downgraded.  ZooKeeper versions before 3.4 do not support multi-update and will
-      not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).
-    </description>
-  </property>
-  <property>
-    <name>zookeeper.znode.parent</name>
-    <value>/hbase</value>
-    <description>Root ZNode for HBase in ZooKeeper. All of HBase's ZooKeeper
-      files that are configured with a relative path will go under this node.
-      By default, all of HBase's ZooKeeper file path are configured with a
-      relative path, so they will all go under this directory unless changed.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.defaults.for.version.skip</name>
-    <value>false</value>
-    <description>Disables version verification.</description>
-  </property>
-
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/metainfo.xml
index 29dd01f..741f744 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/metainfo.xml
@@ -20,109 +20,20 @@
   <services>
     <service>
       <name>HBASE</name>
-      <displayName>HBase</displayName>
-      <comment>Non-relational distributed database and centralized service for configuration management &amp;
-        synchronization
-      </comment>
-      <version>0.96.0.2.0</version>
+      <extends>common-services/HBASE/0.96.0.2.0</extends>
+      <version>0.98.0.2.1.1.0</version>
       <components>
         <component>
-          <name>HBASE_MASTER</name>
-          <displayName>HBase Master</displayName>
-          <category>MASTER</category>
-          <cardinality>1+</cardinality>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
-              <scope>cluster</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-                <co-locate>HBASE/HBASE_MASTER</co-locate>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts\hbase_master.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <customCommands>
-            <customCommand>
-              <name>DECOMMISSION</name>
-              <commandScript>
-                <script>scripts\hbase_master.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>600</timeout>
-              </commandScript>
-            </customCommand>
-          </customCommands>
-        </component>
-
-        <component>
-          <name>HBASE_REGIONSERVER</name>
-          <displayName>RegionServer</displayName>
-          <category>SLAVE</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts\hbase_regionserver.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-
-        <component>
           <name>HBASE_CLIENT</name>
-          <displayName>HBase Client</displayName>
-          <category>CLIENT</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts\hbase_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
           <configFiles>
             <configFile>
-              <type>xml</type>
-              <fileName>hbase-site.xml</fileName>
-              <dictionaryName>hbase-site</dictionaryName>
-            </configFile>
-            <configFile>
               <type>env</type>
               <fileName>hbase-env.cmd</fileName>
               <dictionaryName>hbase-env</dictionaryName>
             </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>log4j.properties</fileName>
-              <dictionaryName>hbase-log4j</dictionaryName>
-            </configFile>
           </configFiles>
         </component>
       </components>
-
-      <commandScript>
-        <script>scripts\service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <requiredServices>
-        <service>ZOOKEEPER</service>
-        <service>HDFS</service>
-      </requiredServices>
-
-      <configuration-dependencies>
-        <config-type>hbase-policy</config-type>
-        <config-type>hbase-site</config-type>
-        <config-type>hbase-env</config-type>
-        <config-type>hbase-log4j</config-type>
-      </configuration-dependencies>
-
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/core-site.xml
index 2dbda14..2d406fc 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/core-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/core-site.xml
@@ -1,202 +1,27 @@
 <?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!-- Put site-specific property overrides in this file. -->
-<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
-  <!-- i/o properties -->
-  <property>
-    <name>io.file.buffer.size</name>
-    <value>131072</value>
-    <description>The size of buffer for use in sequence files.
-  The size of this buffer should probably be a multiple of hardware
-  page size (4096 on Intel x86), and it determines how much data is
-  buffered during read and write operations.</description>
-  </property>
-  <property>
-    <name>io.serializations</name>
-    <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
-  </property>
-  <property>
-    <name>io.compression.codecs</name>
-    <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec</value>
-    <description>A list of the compression codec classes that can be used
-                 for compression/decompression.</description>
-  </property>
-  <!-- file system properties -->
-  <property>
-    <name>fs.defaultFS</name>
-    <value>hdfs://localhost:8020</value>
-    <description>The name of the default file system.  Either the
-  literal string "local" or a host:port for NDFS.</description>
-    <final>true</final>
-  </property>
-  <property>
-    <name>fs.trash.interval</name>
-    <value>360</value>
-    <description>Number of minutes after which the checkpoint
-      gets deleted.  If zero, the trash feature is disabled.
-    </description>
-  </property>
-  <property>
-    <name>fs.trash.checkpoint.interval</name>
-    <value>0</value>
-    <description>Number of minutes between trash checkpoints.
-      Should be smaller or equal to fs.trash.interval. If zero,
-      the value is set to the value of fs.trash.interval.
-    </description>
-  </property>
-  <property>
-    <name>fs.permissions.umask-mode</name>
-    <value>022</value>
-    <description>The umask used when creating files and directories.
-      Can be in octal or in symbolic. Examples are: "022" (octal for
-      u=rwx,g=r-x,o=r-x in symbolic), or "u=rwx,g=rwx,o=" (symbolic
-      for 007 in octal).</description>
-  </property>
-  <property>
-    <name>ipc.client.idlethreshold</name>
-    <value>8000</value>
-    <description>Defines the threshold number of connections after which
-               connections will be inspected for idleness.
-    </description>
-  </property>
-  <property>
-    <name>ipc.client.connection.maxidletime</name>
-    <value>30000</value>
-    <description>The maximum time after which a client will bring down the
-               connection to the server.
-    </description>
-  </property>
-  <property>
-    <name>ipc.client.connect.max.retries</name>
-    <value>50</value>
-    <description>Defines the maximum number of retries for IPC connections.</description>
-  </property>
-  <!-- Web Interface Configuration -->
-  <property>
-    <name>hadoop.http.staticuser.user</name>
-    <value>gopher</value>
-    <description>
-      The user name to filter as, on static web filters
-      while rendering content. An example use is the HDFS
-      web UI (user to be used for browsing files).
-    </description>
-  </property>
-  <property>
-    <name>webinterface.private.actions</name>
-    <value>false</value>
-    <description> If set to true, the web interfaces of RM and NN may contain
-                actions, such as kill job, delete file, etc., that should
-                not be exposed to public. Enable this option if the interfaces
-                are only reachable by those who have the right authorization.
-    </description>
-  </property>
-  <property>
-    <name>hadoop.security.authentication</name>
-    <value>simple</value>
-    <description>
-      Set the authentication for the cluster. Valid values are: simple or
-      kerberos.
-    </description>
-  </property>
-  <property>
-    <name>hadoop.security.authorization</name>
-    <value>false</value>
-    <description>
-     Enable authorization for different protocols.
-  </description>
-  </property>
-  <property>
-    <name>hadoop.proxyuser.hadoop.groups</name>
-    <value>HadoopUsers</value>
-    <description>
-     Proxy group for Hadoop.
-  </description>
-  </property>
-  <property>
-    <name>hadoop.ssl.enabled</name>
-    <value>false</value>
-  </property>
-  <property>
-    <name>hadoop.ssl.require.client.cert</name>
-    <value>false</value>
-  </property>
-  <property>
-    <name>hadoop.ssl.hostname.verifier</name>
-    <value>DEFAULT</value>
-  </property>
-  <property>
-    <name>hadoop.ssl.keystores.factory.class</name>
-    <value>org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory</value>
-  </property>
-  <property>
-    <name>hadoop.ssl.server.conf</name>
-    <value>ssl-server.xml</value>
-  </property>
-  <property>
-    <name>hadoop.ssl.client.conf</name>
-    <value>ssl-client.xml</value>
-  </property>
-  <property>
-    <name>hadoop.rpc.protection</name>
-    <value>authentication</value>
-  </property>
-  <property>
-    <name>hadoop.tmp.dir</name>
-    <value>c:\hdp\temp\hadoop</value>
-  </property>
 
-  <property>
-    <name>hadoop.proxyuser.hadoop.hosts</name>
-    <value>192.168.145.128</value>
-  </property>
-
-  <property>
-    <name>hadoop.security.auth_to_local</name>
-    <value>
-        RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/
-        RULE:[2:$1@$0](jhs@.*)s/.*/mapred/
-        RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/
-        RULE:[2:$1@$0](hm@.*)s/.*/hbase/
-        RULE:[2:$1@$0](rs@.*)s/.*/hbase/
-        DEFAULT
-    </value>
-<description>The mapping from kerberos principal names to local OS mapreduce.job.user.names.
-  So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
-  "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
-The translations rules have 3 sections:
-      base     filter    substitution
-The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
-
-[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
-[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
-[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
-
-The filter is a regex in parens that must the generated string for the rule to apply.
-
-"(.*%admin)" will take any string that ends in "%admin"
-"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
-
-Finally, the substitution is a sed rule to translate a regex into a fixed string.
+  <!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
 
-"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
-"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
-"s/X/Y/g" replaces all of the "X" in the name with "Y"
+       http://www.apache.org/licenses/LICENSE-2.0
 
-So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+  -->
 
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
-
-To also translate the names with a second component, you'd make the rules:
-
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-RULE:[2:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
-
-If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
-
-RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
-DEFAULT
-    </description>
+<!-- Put site-specific property overrides in this file. -->
+<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
+  <property>
+    <name>hadoop.tmp.dir</name>
+    <value>c:\hadoop\temp\hadoop</value>
   </property>
 </configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-env.xml
index b4b90d7..b5451d8 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-env.xml
@@ -26,8 +26,127 @@
 
 <configuration>
   <property>
+    <name>hdfs_log_dir_prefix</name>
+    <value>c:\hadoop\logs\hadoop</value>
+    <description>Hadoop Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>hadoop_pid_dir_prefix</name>
+    <value>c:\hadoop\run\hadoop</value>
+    <description>Hadoop PID Dir Prefix</description>
+  </property>
+  <property>
     <name>dfs.datanode.data.dir.mount.file</name>
     <value>file:///c:/hadoop/conf/dfs_data_dir_mount.hist</value>
     <description>File path that contains the last known mount point for each data dir. This file is used to avoid creating a DFS data dir on the root drive (and filling it up) if a path was previously mounted on a drive.</description>
   </property>
+
+  <property>
+    <name>proxyuser_group</name>
+    <deleted>true</deleted>
+  </property>
+  <property>
+    <name>hdfs_user</name>
+    <deleted>true</deleted>
+  </property>
+
+  <!-- hadoop-env.cmd -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for hadoop-env.cmd file</description>
+    <value>
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements.  See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License.  You may obtain a copy of the License at
+@rem
+@rem     http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+@rem Set Hadoop-specific environment variables here.
+
+@rem The only required environment variable is JAVA_HOME.  All others are
+@rem optional.  When running a distributed configuration it is best to
+@rem set JAVA_HOME in this file, so that it is correctly defined on
+@rem remote nodes.
+
+@rem The java implementation to use.  Required.
+set JAVA_HOME=%JAVA_HOME%
+
+@rem The jsvc implementation to use. Jsvc is required to run secure datanodes.
+@rem set JSVC_HOME=%JSVC_HOME%
+
+@rem set HADOOP_CONF_DIR=
+
+@rem Extra Java CLASSPATH elements.  Automatically insert capacity-scheduler.
+if exist %HADOOP_HOME%\contrib\capacity-scheduler (
+if not defined HADOOP_CLASSPATH (
+set HADOOP_CLASSPATH=%HADOOP_HOME%\contrib\capacity-scheduler\*.jar
+) else (
+set HADOOP_CLASSPATH=%HADOOP_CLASSPATH%;%HADOOP_HOME%\contrib\capacity-scheduler\*.jar
+)
+)
+
+@rem If TEZ_CLASSPATH is defined in the env, that means that TEZ is enabled
+@rem append it to the HADOOP_CLASSPATH
+
+if defined TEZ_CLASSPATH (
+if not defined HADOOP_CLASSPATH (
+set HADOOP_CLASSPATH=%TEZ_CLASSPATH%
+) else (
+set HADOOP_CLASSPATH=%HADOOP_CLASSPATH%;%TEZ_CLASSPATH%
+)
+)
+
+@rem The maximum amount of heap to use, in MB. Default is 1000.
+@rem set HADOOP_HEAPSIZE=
+@rem set HADOOP_NAMENODE_INIT_HEAPSIZE=""
+
+@rem Extra Java runtime options.  Empty by default.
+@rem set HADOOP_OPTS=%HADOOP_OPTS% -Djava.net.preferIPv4Stack=true
+
+@rem Command specific options appended to HADOOP_OPTS when specified
+if not defined HADOOP_SECURITY_LOGGER (
+set HADOOP_SECURITY_LOGGER=INFO,RFAS
+)
+if not defined HDFS_AUDIT_LOGGER (
+set HDFS_AUDIT_LOGGER=INFO,NullAppender
+)
+
+set HADOOP_NAMENODE_OPTS=-Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% -Dhdfs.audit.logger=%HDFS_AUDIT_LOGGER% %HADOOP_NAMENODE_OPTS%
+set HADOOP_DATANODE_OPTS=-Dhadoop.security.logger=ERROR,RFAS %HADOOP_DATANODE_OPTS%
+set HADOOP_SECONDARYNAMENODE_OPTS=-Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% -Dhdfs.audit.logger=%HDFS_AUDIT_LOGGER% %HADOOP_SECONDARYNAMENODE_OPTS%
+
+@rem The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+set HADOOP_CLIENT_OPTS=-Xmx512m %HADOOP_CLIENT_OPTS%
+@rem set HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData %HADOOP_JAVA_PLATFORM_OPTS%"
+
+@rem On secure datanodes, user to run the datanode as after dropping privileges
+set HADOOP_SECURE_DN_USER=%HADOOP_SECURE_DN_USER%
+
+@rem Where log files are stored.  %HADOOP_HOME%/logs by default.
+@rem set HADOOP_LOG_DIR=%HADOOP_LOG_DIR%\%USERNAME%
+
+@rem Where log files are stored in the secure data environment.
+set HADOOP_SECURE_DN_LOG_DIR=%HADOOP_LOG_DIR%\%HADOOP_HDFS_USER%
+
+@rem The directory where pid files are stored. /tmp by default.
+@rem NOTE: this should be set to a directory that can only be written to by
+@rem       the user that will run the hadoop daemons.  Otherwise there is the
+@rem       potential for a symlink attack.
+set HADOOP_PID_DIR=%HADOOP_PID_DIR%
+set HADOOP_SECURE_DN_PID_DIR=%HADOOP_PID_DIR%
+
+@rem A string representing this instance of hadoop. %USERNAME% by default.
+set HADOOP_IDENT_STRING=%USERNAME%
+    </value>
+  </property>
 </configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6f6e87/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-policy.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-policy.xml
deleted file mode 100644
index 1549b41..0000000
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-policy.xml
+++ /dev/null
@@ -1,219 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-
- Copyright 2011 The Apache Software Foundation
-
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration supports_final="true">
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientProtocol, which is used by user code
-    via the DistributedFileSystem.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.client.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
-    for block recovery.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for DatanodeProtocol, which is used by datanodes to
-    communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
-    for updating generation timestamp.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.namenode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for NamenodeProtocol, the protocol used by the secondary
-    namenode to communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
- <property>
-    <name>security.admin.operations.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for AdminOperationsProtocol. Used for admin commands.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.refresh.usertogroups.mappings.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
-    users mappings. The ACL is a comma-separated list of user and
-    group names. The user and group list is separated by a blank. For
-    e.g. "alice,bob users,wheel".  A special value of "*" means all
-    users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.refresh.policy.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
-    dfsadmin and mradmin commands to refresh the security policy in-effect.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.ha.service.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HAService protocol used by HAAdmin to manage the
-      active and stand-by states of namenode.</description>
-  </property>
-
-  <property>
-    <name>security.zkfc.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for access to the ZK Failover Controller
-    </description>
-  </property>
-
-  <property>
-    <name>security.qjournal.service.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for QJournalProtocol, used by the NN to communicate with
-    JNs when using the QuorumJournalManager for edit logs.</description>
-  </property>
-
-  <property>
-    <name>security.mrhs.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HSClientProtocol, used by job clients to
-    communciate with the MR History Server job status etc.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <!-- YARN Protocols -->
-
-  <property>
-    <name>security.resourcetracker.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ResourceTrackerProtocol, used by the
-    ResourceManager and NodeManager to communicate with each other.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.resourcemanager-administration.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ResourceManagerAdministrationProtocol, for admin commands.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.applicationclient.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ApplicationClientProtocol, used by the ResourceManager
-    and applications submission clients to communicate with each other.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.applicationmaster.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ApplicationMasterProtocol, used by the ResourceManager
-    and ApplicationMasters to communicate with each other.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.containermanagement.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ContainerManagementProtocol protocol, used by the NodeManager
-    and ApplicationMasters to communicate with each other.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.resourcelocalizer.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ResourceLocalizer protocol, used by the NodeManager
-    and ResourceLocalizer to communicate with each other.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.job.task.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
-    tasks to communicate with the parent tasktracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.job.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for MRClientProtocol, used by job clients to
-    communciate with the MR ApplicationMaster to query job status etc.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-</configuration>