You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ao...@apache.org on 2014/01/31 20:51:04 UTC

[39/51] [partial] AMBARI-4491. Move all the supported versions in Baikal for stack to python code (remove dependence on puppet). (aonishuk)

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/service_check.py
deleted file mode 100644
index ff6d0ed..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/service_check.py
+++ /dev/null
@@ -1,89 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import functions
-
-
-class HbaseServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-    
-    output_file = "/apps/hbase/data/ambarismoketest"
-    test_cmd = format("fs -test -e {output_file}")
-    kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smoke_test_user};") if params.security_enabled else ""
-    hbase_servicecheck_file = '/tmp/hbase-smoke.sh'
-  
-    File( '/tmp/hbaseSmokeVerify.sh',
-      content = StaticFile("hbaseSmokeVerify.sh"),
-      mode = 0755
-    )
-  
-    File( hbase_servicecheck_file,
-      mode = 0755,
-      content = Template('hbase-smoke.sh.j2')
-    )
-    
-    if params.security_enabled:    
-      hbase_grant_premissions_file = '/tmp/hbase_grant_permissions.sh'
-      hbase_kinit_cmd = format("{kinit_path_local} -kt {hbase_user_keytab} {hbase_user};")
-      grantprivelegecmd = format("{hbase_kinit_cmd} hbase shell {hbase_grant_premissions_file}")
-  
-      File( hbase_grant_premissions_file,
-        owner   = params.hbase_user,
-        group   = params.user_group,
-        mode    = 0644,
-        content = Template('hbase_grant_permissions.j2')
-      )
-      
-      Execute( grantprivelegecmd,
-        user = params.hbase_user,
-      )
-
-    servicecheckcmd = format("{kinit_cmd} hbase --config {conf_dir} shell {hbase_servicecheck_file}")
-    smokeverifycmd = format("{kinit_cmd} /tmp/hbaseSmokeVerify.sh {conf_dir} {service_check_data}")
-  
-    Execute( servicecheckcmd,
-      tries     = 3,
-      try_sleep = 5,
-      user = params.smoke_test_user,
-      logoutput = True
-    )
-  
-    Execute ( smokeverifycmd,
-      tries     = 3,
-      try_sleep = 5,
-      user = params.smoke_test_user,
-      logoutput = True
-    )
-    
-def main():
-  import sys
-  command_type = 'perform'
-  command_data_file = '/root/workspace/HBase/input.json'
-  basedir = '/root/workspace/HBase/main'
-  sys.argv = ["", command_type, command_data_file, basedir]
-  
-  HbaseServiceCheck().execute()
-  
-if __name__ == "__main__":
-  HbaseServiceCheck().execute()
-  

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/status_params.py
deleted file mode 100644
index c9b20ef..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/status_params.py
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-pid_dir = config['configurations']['global']['hbase_pid_dir']

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-MASTER.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-MASTER.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-MASTER.j2
deleted file mode 100644
index 1c75d15..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-MASTER.j2
+++ /dev/null
@@ -1,50 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See http://wiki.apache.org/hadoop/GangliaMetrics
-#
-# Make sure you know whether you are using ganglia 3.0 or 3.1.
-# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
-# And, yes, this file is named hadoop-metrics.properties rather than
-# hbase-metrics.properties because we're leveraging the hadoop metrics
-# package and hadoop-metrics.properties is an hardcoded-name, at least
-# for the moment.
-#
-# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
-
-# HBase-specific configuration to reset long-running stats (e.g. compactions)
-# If this variable is left out, then the default is no expiration.
-hbase.extendedperiod = 3600
-
-# Configuration of the "hbase" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-hbase.period=10
-hbase.servers={{ganglia_server_host}}:8663
-
-# Configuration of the "jvm" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-jvm.period=10
-jvm.servers={{ganglia_server_host}}:8663
-
-# Configuration of the "rpc" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-rpc.period=10
-rpc.servers={{ganglia_server_host}}:8663

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-RS.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-RS.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-RS.j2
deleted file mode 100644
index e971e13..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-RS.j2
+++ /dev/null
@@ -1,50 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See http://wiki.apache.org/hadoop/GangliaMetrics
-#
-# Make sure you know whether you are using ganglia 3.0 or 3.1.
-# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
-# And, yes, this file is named hadoop-metrics.properties rather than
-# hbase-metrics.properties because we're leveraging the hadoop metrics
-# package and hadoop-metrics.properties is an hardcoded-name, at least
-# for the moment.
-#
-# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
-
-# HBase-specific configuration to reset long-running stats (e.g. compactions)
-# If this variable is left out, then the default is no expiration.
-hbase.extendedperiod = 3600
-
-# Configuration of the "hbase" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-hbase.period=10
-hbase.servers={{ganglia_server_host}}:8656
-
-# Configuration of the "jvm" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-jvm.period=10
-jvm.servers={{ganglia_server_host}}:8656
-
-# Configuration of the "rpc" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-rpc.period=10
-rpc.servers={{ganglia_server_host}}:8656

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties.j2
deleted file mode 100644
index 1c75d15..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties.j2
+++ /dev/null
@@ -1,50 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See http://wiki.apache.org/hadoop/GangliaMetrics
-#
-# Make sure you know whether you are using ganglia 3.0 or 3.1.
-# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
-# And, yes, this file is named hadoop-metrics.properties rather than
-# hbase-metrics.properties because we're leveraging the hadoop metrics
-# package and hadoop-metrics.properties is an hardcoded-name, at least
-# for the moment.
-#
-# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
-
-# HBase-specific configuration to reset long-running stats (e.g. compactions)
-# If this variable is left out, then the default is no expiration.
-hbase.extendedperiod = 3600
-
-# Configuration of the "hbase" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-hbase.period=10
-hbase.servers={{ganglia_server_host}}:8663
-
-# Configuration of the "jvm" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-jvm.period=10
-jvm.servers={{ganglia_server_host}}:8663
-
-# Configuration of the "rpc" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-rpc.period=10
-rpc.servers={{ganglia_server_host}}:8663

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase-env.sh.j2
deleted file mode 100644
index b8505b5..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase-env.sh.j2
+++ /dev/null
@@ -1,82 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# Set environment variables here.
-
-# The java implementation to use. Java 1.6 required.
-export JAVA_HOME={{java64_home}}
-
-# HBase Configuration directory
-export HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{conf_dir}}}
-
-# Extra Java CLASSPATH elements. Optional.
-export HBASE_CLASSPATH=${HBASE_CLASSPATH}
-
-# The maximum amount of heap to use, in MB. Default is 1000.
-# export HBASE_HEAPSIZE=1000
-
-# Extra Java runtime options.
-# Below are what we set by default. May only work with SUN JVM.
-# For more on why as well as other possible settings,
-# see http://wiki.apache.org/hadoop/PerformanceTuning
-export HBASE_OPTS="-XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log"
-export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`"
-# Uncomment below to enable java garbage collection logging.
-# export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log"
-
-# Uncomment and adjust to enable JMX exporting
-# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
-# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
-#
-# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
-export HBASE_MASTER_OPTS="-Xmx{{master_heapsize}}"
-export HBASE_REGIONSERVER_OPTS="-Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}"
-# export HBASE_THRIFT_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
-# export HBASE_ZOOKEEPER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
-
-# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
-export HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers
-
-# Extra ssh options. Empty by default.
-# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
-
-# Where log files are stored. $HBASE_HOME/logs by default.
-export HBASE_LOG_DIR={{log_dir}}
-
-# A string representing this instance of hbase. $USER by default.
-# export HBASE_IDENT_STRING=$USER
-
-# The scheduling priority for daemon processes. See 'man nice'.
-# export HBASE_NICENESS=10
-
-# The directory where pid files are stored. /tmp by default.
-export HBASE_PID_DIR={{pid_dir}}
-
-# Seconds to sleep between slave commands. Unset by default. This
-# can be useful in large clusters, where, e.g., slave rsyncs can
-# otherwise arrive faster than the master can service them.
-# export HBASE_SLAVE_SLEEP=0.1
-
-# Tell HBase whether it should manage it's own instance of Zookeeper or not.
-export HBASE_MANAGES_ZK=false
-
-{% if security_enabled %}
-export HBASE_OPTS="$HBASE_OPTS -Djava.security.auth.login.config={{client_jaas_config_file}}"
-export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Djava.security.auth.login.config={{master_jaas_config_file}}"
-export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Djava.security.auth.login.config={{regionserver_jaas_config_file}}"
-{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase-smoke.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase-smoke.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase-smoke.sh.j2
deleted file mode 100644
index 61fe62f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase-smoke.sh.j2
+++ /dev/null
@@ -1,26 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-disable 'ambarismoketest'
-drop 'ambarismoketest'
-create 'ambarismoketest','family'
-put 'ambarismoketest','row01','family:col01','{{service_check_data}}'
-scan 'ambarismoketest'
-exit
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_client_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_client_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_client_jaas.conf.j2
deleted file mode 100644
index 3b3bb18..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_client_jaas.conf.j2
+++ /dev/null
@@ -1,23 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-Client {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=false
-useTicketCache=true;
-};

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_grant_permissions.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_grant_permissions.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_grant_permissions.j2
deleted file mode 100644
index 9102d35..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_grant_permissions.j2
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-grant '{{smoke_test_user}}', '{{smokeuser_permissions}}'
-exit
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_master_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_master_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_master_jaas.conf.j2
deleted file mode 100644
index 9cf35d3..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_master_jaas.conf.j2
+++ /dev/null
@@ -1,25 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-Client {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=true
-storeKey=true
-useTicketCache=false
-keyTab="{{master_keytab_path}}"
-principal="{{master_jaas_princ}}";
-};

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
deleted file mode 100644
index bd1d727..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
+++ /dev/null
@@ -1,25 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-Client {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=true
-storeKey=true
-useTicketCache=false
-keyTab="{{regionserver_keytab_path}}"
-principal="{{regionserver_jaas_princ}}";
-};

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/regionservers.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/regionservers.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/regionservers.j2
deleted file mode 100644
index b22ae5f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/regionservers.j2
+++ /dev/null
@@ -1,2 +0,0 @@
-{% for host in rs_hosts %}{{host}}
-{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/core-site.xml
deleted file mode 100644
index d2bff0f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/core-site.xml
+++ /dev/null
@@ -1,255 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
- <!--
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
- 
-        http://www.apache.org/licenses/LICENSE-2.0
- 
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
- -->
- 
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-<!-- i/o properties -->
-
-  <property>
-    <name>io.file.buffer.size</name>
-    <value>131072</value>
-    <description>The size of buffer for use in sequence files.
-  The size of this buffer should probably be a multiple of hardware
-  page size (4096 on Intel x86), and it determines how much data is
-  buffered during read and write operations.</description>
-  </property>
-
-  <property>
-    <name>io.serializations</name>
-    <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
-    <description> A list of comma-delimited serialization classes that can be used for obtaining serializers and deserializers.
-    </description>
-  </property>
-
-  <property>
-    <name>io.compression.codecs</name>
-    <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.SnappyCodec</value>
-    <description>A list of the compression codec classes that can be used
-                 for compression/decompression.</description>
-  </property>
-
-
-  <property>
-    <name>io.compression.codec.lzo.class</name>
-    <value>com.hadoop.compression.lzo.LzoCodec</value>
-    <description>The implementation for lzo codec.</description>
-  </property>
-
-
-<!-- file system properties -->
-
-  <property>
-    <name>fs.default.name</name>
-    <!-- cluster variant -->
-    <value>hdfs://localhost:8020</value>
-    <description>The name of the default file system.  Either the
-  literal string "local" or a host:port for NDFS.</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>fs.trash.interval</name>
-    <value>360</value>
-    <description>Number of minutes between trash checkpoints.
-  If zero, the trash feature is disabled.
-  </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.dir</name>
-    <value>/hadoop/hdfs/namesecondary</value>
-    <description>Determines where on the local filesystem the DFS secondary
-        name node should store the temporary images to merge.
-        If this is a comma-delimited list of directories then the image is
-        replicated in all of the directories for redundancy.
-    </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.edits.dir</name>
-    <value>${fs.checkpoint.dir}</value>
-    <description>Determines where on the local filesystem the DFS secondary
-        name node should store the temporary edits to merge.
-        If this is a comma-delimited list of directoires then teh edits is
-        replicated in all of the directoires for redundancy.
-        Default value is same as fs.checkpoint.dir
-    </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.period</name>
-    <value>21600</value>
-    <description>The number of seconds between two periodic checkpoints.
-  </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.size</name>
-    <value>67108864</value>
-    <description>The size of the current edit log (in bytes) that triggers
-       a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
-  </description>
-  </property>
-
-  <!-- ipc properties: copied from kryptonite configuration -->
-  <property>
-    <name>ipc.client.idlethreshold</name>
-    <value>8000</value>
-    <description>Defines the threshold number of connections after which
-               connections will be inspected for idleness.
-  </description>
-  </property>
-
-  <property>
-    <name>ipc.client.connection.maxidletime</name>
-    <value>30000</value>
-    <description>The maximum time after which a client will bring down the
-               connection to the server.
-  </description>
-  </property>
-
-  <property>
-    <name>ipc.client.connect.max.retries</name>
-    <value>50</value>
-    <description>Defines the maximum number of retries for IPC connections.</description>
-  </property>
-
-  <!-- Web Interface Configuration -->
-  <property>
-    <name>webinterface.private.actions</name>
-    <value>false</value>
-    <description> If set to true, the web interfaces of JT and NN may contain
-                actions, such as kill job, delete file, etc., that should
-                not be exposed to public. Enable this option if the interfaces
-                are only reachable by those who have the right authorization.
-  </description>
-  </property>
-
- <property>
-   <name>hadoop.security.authentication</name>
-   <value>simple</value>
-   <description>
-   Set the authentication for the cluster. Valid values are: simple or
-   kerberos.
-   </description>
- </property>
-<property>
-  <name>hadoop.security.authorization</name>
-  <value></value>
-  <description>
-     Enable authorization for different protocols.
-  </description>
-</property>
-
-  <property>
-    <name>hadoop.security.auth_to_local</name>
-    <value></value>
-<description>The mapping from kerberos principal names to local OS user names.
-  So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
-  "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
-The translations rules have 3 sections:
-      base     filter    substitution
-The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
-
-[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
-[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
-[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
-
-The filter is a regex in parens that must the generated string for the rule to apply.
-
-"(.*%admin)" will take any string that ends in "%admin"
-"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
-
-Finally, the substitution is a sed rule to translate a regex into a fixed string.
-
-"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
-"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
-"s/X/Y/g" replaces all of the "X" in the name with "Y"
-
-So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
-
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
-
-To also translate the names with a second component, you'd make the rules:
-
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-RULE:[2:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
-
-If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
-
-RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
-DEFAULT
-    </description>
-  </property>
-
-<!--
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("hcat_user").groups</name>
-  <value></value>
-  <description>
-     Proxy group for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("hcat_user").hosts</name>
-  <value></value>
-  <description>
-     Proxy host for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("oozie_user").groups</name>
-  <value></value>
-  <description>
-     Proxy group for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("oozie_user").hosts</name>
-  <value></value>
-  <description>
-     Proxy host for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("templeton_user").groups</name>
-  <value></value>
-  <description>
-    Proxy group for templeton.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("templeton_user").hosts</name>
-  <value></value>
-  <description>
-    Proxy host for templeton.
-  </description>
-</property>
--->
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/global.xml
deleted file mode 100644
index 04d51db..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/global.xml
+++ /dev/null
@@ -1,187 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>namenode_host</name>
-    <value></value>
-    <description>NameNode Host.</description>
-  </property>
-  <property>
-    <name>dfs_name_dir</name>
-    <value>/hadoop/hdfs/namenode</value>
-    <description>NameNode Directories.</description>
-  </property>
-  <property>
-    <name>snamenode_host</name>
-    <value></value>
-    <description>Secondary NameNode.</description>
-  </property>
-  <property>
-    <name>fs_checkpoint_dir</name>
-    <value>/hadoop/hdfs/namesecondary</value>
-    <description>Secondary NameNode checkpoint dir.</description>
-  </property>
-  <property>
-    <name>datanode_hosts</name>
-    <value></value>
-    <description>List of Datanode Hosts.</description>
-  </property>
-  <property>
-    <name>dfs_data_dir</name>
-    <value>/hadoop/hdfs/data</value>
-    <description>Data directories for Data Nodes.</description>
-  </property>
-  <property>
-    <name>hdfs_log_dir_prefix</name>
-    <value>/var/log/hadoop</value>
-    <description>Hadoop Log Dir Prefix</description>
-  </property>
-  <property>
-    <name>hadoop_pid_dir_prefix</name>
-    <value>/var/run/hadoop</value>
-    <description>Hadoop PID Dir Prefix</description>
-  </property>
-  <property>
-    <name>dfs_webhdfs_enabled</name>
-    <value>true</value>
-    <description>WebHDFS enabled</description>
-  </property>
-  <property>
-    <name>hadoop_heapsize</name>
-    <value>1024</value>
-    <description>Hadoop maximum Java heap size</description>
-  </property>
-  <property>
-    <name>namenode_heapsize</name>
-    <value>1024</value>
-    <description>NameNode Java heap size</description>
-  </property>
-  <property>
-    <name>namenode_opt_newsize</name>
-    <value>200</value>
-    <description>NameNode new generation size</description>
-  </property>
-  <property>
-    <name>namenode_opt_maxnewsize</name>
-    <value>200</value>
-    <description>NameNode maximum new generation size</description>
-  </property>
-  <property>
-    <name>datanode_du_reserved</name>
-    <value>1</value>
-    <description>Reserved space for HDFS</description>
-  </property>
-  <property>
-    <name>dtnode_heapsize</name>
-    <value>1024</value>
-    <description>DataNode maximum Java heap size</description>
-  </property>
-  <property>
-    <name>dfs_datanode_failed_volume_tolerated</name>
-    <value>0</value>
-    <description>DataNode volumes failure toleration</description>
-  </property>
-  <property>
-    <name>fs_checkpoint_period</name>
-    <value>21600</value>
-    <description>HDFS Maximum Checkpoint Delay</description>
-  </property>
-  <property>
-    <name>fs_checkpoint_size</name>
-    <value>0.5</value>
-    <description>FS Checkpoint Size.</description>
-  </property>
-  <property>
-    <name>proxyuser_group</name>
-    <value>users</value>
-    <description>Proxy user group.</description>
-  </property>
-  <property>
-    <name>dfs_exclude</name>
-    <value></value>
-    <description>HDFS Exclude hosts.</description>
-  </property>
-  <property>
-    <name>dfs_include</name>
-    <value></value>
-    <description>HDFS Include hosts.</description>
-  </property>
-  <property>
-    <name>dfs_replication</name>
-    <value>3</value>
-    <description>Default Block Replication.</description>
-  </property>
-  <property>
-    <name>dfs_block_local_path_access_user</name>
-    <value>hbase</value>
-    <description>Default Block Replication.</description>
-  </property>
-  <property>
-    <name>dfs_datanode_address</name>
-    <value>50010</value>
-    <description>Port for datanode address.</description>
-  </property>
-  <property>
-    <name>dfs_datanode_http_address</name>
-    <value>50075</value>
-    <description>Port for datanode address.</description>
-  </property>
-  <property>
-    <name>dfs_datanode_data_dir_perm</name>
-    <value>750</value>
-    <description>Datanode dir perms.</description>
-  </property>
-
-  <property>
-    <name>security_enabled</name>
-    <value>false</value>
-    <description>Hadoop Security</description>
-  </property>
-  <property>
-    <name>kerberos_domain</name>
-    <value>EXAMPLE.COM</value>
-    <description>Kerberos realm.</description>
-  </property>
-  <property>
-    <name>kadmin_pw</name>
-    <value></value>
-    <description>Kerberos realm admin password</description>
-  </property>
-  <property>
-    <name>keytab_path</name>
-    <value>/etc/security/keytabs</value>
-    <description>Kerberos keytab path.</description>
-  </property>
-
-    <property>
-    <name>namenode_formatted_mark_dir</name>
-    <value>/var/run/hadoop/hdfs/namenode/formatted/</value>
-    <description>Formatteed Mark Directory.</description>
-  </property>
-    <property>
-    <name>hdfs_user</name>
-    <value>hdfs</value>
-    <description>User and Groups.</description>
-  </property>
-  
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/hadoop-policy.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/hadoop-policy.xml
deleted file mode 100644
index 900da99..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/hadoop-policy.xml
+++ /dev/null
@@ -1,134 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientProtocol, which is used by user code
-    via the DistributedFileSystem.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.client.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
-    for block recovery.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for DatanodeProtocol, which is used by datanodes to
-    communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
-    for updating generation timestamp.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.namenode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for NamenodeProtocol, the protocol used by the secondary
-    namenode to communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.tracker.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterTrackerProtocol, used by the tasktrackers to
-    communicate with the jobtracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.job.submission.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for JobSubmissionProtocol, used by job clients to
-    communciate with the jobtracker for job submission, querying job status etc.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.task.umbilical.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
-    tasks to communicate with the parent tasktracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
- <property>
-    <name>security.admin.operations.protocol.acl</name>
-    <value></value>
-    <description>ACL for AdminOperationsProtocol. Used for admin commands.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.refresh.usertogroups.mappings.protocol.acl</name>
-    <value></value>
-    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
-    users mappings. The ACL is a comma-separated list of user and
-    group names. The user and group list is separated by a blank. For
-    e.g. "alice,bob users,wheel".  A special value of "*" means all
-    users are allowed.</description>
-  </property>
-
-<property>
-    <name>security.refresh.policy.protocol.acl</name>
-    <value></value>
-    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
-    dfsadmin and mradmin commands to refresh the security policy in-effect.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/hdfs-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/hdfs-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/hdfs-log4j.xml
deleted file mode 100644
index 4fd0b22..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/hdfs-log4j.xml
+++ /dev/null
@@ -1,305 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>hadoop.root.logger</name>
-    <value>INFO,console</value>
-  </property>
-  <property>
-    <name>hadoop.log.dir</name>
-    <value>.</value>
-  </property>
-  <property>
-    <name>hadoop.log.file</name>
-    <value>hadoop.log</value>
-  </property>
-  <property>
-    <name>log4j.rootLogger</name>
-    <value>${hadoop.root.logger}, EventCounter</value>
-  </property>
-  <property>
-    <name>log4j.threshhold</name>
-    <value>ALL</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFA</name>
-    <value>org.apache.log4j.DailyRollingFileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFA.File</name>
-    <value>${hadoop.log.dir}/${hadoop.log.file}</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFA.DatePattern</name>
-    <value>.yyyy-MM-dd</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFA.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFA.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %p %c: %m%n</value>
-  </property>
-  <property>
-    <name>log4j.appender.console</name>
-    <value>org.apache.log4j.ConsoleAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.console.target</name>
-    <value>System.err</value>
-  </property>
-  <property>
-    <name>log4j.appender.console.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.console.layout.ConversionPattern</name>
-    <value>%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n</value>
-  </property>
-  <property>
-    <name>hadoop.tasklog.taskid</name>
-    <value>null</value>
-  </property>
-  <property>
-    <name>hadoop.tasklog.iscleanup</name>
-    <value>false</value>
-  </property>
-  <property>
-    <name>hadoop.tasklog.noKeepSplits</name>
-    <value>4</value>
-  </property>
-  <property>
-    <name>hadoop.tasklog.totalLogFileSize</name>
-    <value>100</value>
-  </property>
-  <property>
-    <name>hadoop.tasklog.purgeLogSplits</name>
-    <value>true</value>
-  </property>
-  <property>
-    <name>hadoop.tasklog.logsRetainHours</name>
-    <value>12</value>
-  </property>
-  <property>
-    <name>log4j.appender.TLA</name>
-    <value>org.apache.hadoop.mapred.TaskLogAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.TLA.taskId</name>
-    <value>${hadoop.tasklog.taskid}</value>
-  </property>
-  <property>
-    <name>log4j.appender.TLA.isCleanup</name>
-    <value>${hadoop.tasklog.iscleanup}</value>
-  </property>
-  <property>
-    <name>log4j.appender.TLA.totalLogFileSize</name>
-    <value>${hadoop.tasklog.totalLogFileSize}</value>
-  </property>
-  <property>
-    <name>log4j.appender.TLA.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.TLA.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %p %c: %m%n</value>
-  </property>
-  <property>
-    <name>hadoop.security.logger</name>
-    <value>INFO,console</value>
-  </property>
-  <property>
-    <name>hadoop.security.log.maxfilesize</name>
-    <value>256MB</value>
-  </property>
-  <property>
-    <name>hadoop.security.log.maxbackupindex</name>
-    <value>20</value>
-  </property>
-  <property>
-    <name>log4j.category.SecurityLogger</name>
-    <value>${hadoop.security.logger}</value>
-  </property>
-  <property>
-    <name>hadoop.security.log.file</name>
-    <value>SecurityAuth.audit</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFAS</name>
-    <value>org.apache.log4j.DailyRollingFileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFAS.File</name>
-    <value>${hadoop.log.dir}/${hadoop.security.log.file}</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFAS.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFAS.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %p %c: %m%n</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFAS.DatePattern</name>
-    <value>.yyyy-MM-dd</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFAS</name>
-    <value>org.apache.log4j.RollingFileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFAS.File</name>
-    <value>${hadoop.log.dir}/${hadoop.security.log.file}</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFAS.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFAS.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %p %c: %m%n</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFAS.MaxFileSize</name>
-    <value>${hadoop.security.log.maxfilesize}</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFAS.MaxBackupIndex</name>
-    <value>${hadoop.security.log.maxbackupindex}</value>
-  </property>
-  <property>
-    <name>hdfs.audit.logger</name>
-    <value>INFO,console</value>
-  </property>
-  <property>
-    <name>log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit</name>
-    <value>${hdfs.audit.logger}</value>
-  </property>
-  <property>
-    <name>log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit</name>
-    <value>false</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFAAUDIT</name>
-    <value>org.apache.log4j.DailyRollingFileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFAAUDIT.File</name>
-    <value>${hadoop.log.dir}/hdfs-audit.log</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFAAUDIT.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFAAUDIT.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %p %c{2}: %m%n</value>
-  </property>
-  <property>
-    <name>log4j.appender.DRFAAUDIT.DatePattern</name>
-    <value>.yyyy-MM-dd</value>
-  </property>
-  <property>
-    <name>mapred.audit.logger</name>
-    <value>INFO,console</value>
-  </property>
-  <property>
-    <name>log4j.logger.org.apache.hadoop.mapred.AuditLogger</name>
-    <value>${mapred.audit.logger}</value>
-  </property>
-  <property>
-    <name>log4j.additivity.org.apache.hadoop.mapred.AuditLogger</name>
-    <value>false</value>
-  </property>
-  <property>
-    <name>log4j.appender.MRAUDIT</name>
-    <value>org.apache.log4j.DailyRollingFileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.MRAUDIT.File</name>
-    <value>${hadoop.log.dir}/mapred-audit.log</value>
-  </property>
-  <property>
-    <name>log4j.appender.MRAUDIT.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.MRAUDIT.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %p %c{2}: %m%n</value>
-  </property>
-  <property>
-    <name>log4j.appender.MRAUDIT.DatePattern</name>
-    <value>.yyyy-MM-dd</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFA</name>
-    <value>org.apache.log4j.RollingFileAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFA.File</name>
-    <value>${hadoop.log.dir}/${hadoop.log.file}</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFA.MaxFileSize</name>
-    <value>256MB</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFA.MaxBackupIndex</name>
-    <value>10</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFA.layout</name>
-    <value>org.apache.log4j.PatternLayout</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFA.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %-5p %c{2} - %m%n</value>
-  </property>
-  <property>
-    <name>log4j.appender.RFA.layout.ConversionPattern</name>
-    <value>%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n</value>
-  </property>
-  <property>
-    <name>hadoop.metrics.log.level</name>
-    <value>INFO</value>
-  </property>
-  <property>
-    <name>log4j.logger.org.apache.hadoop.metrics2</name>
-    <value>${hadoop.metrics.log.level}</value>
-  </property>
-  <property>
-    <name>log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service</name>
-    <value>ERROR</value>
-  </property>
-  <property>
-    <name>log4j.appender.NullAppender</name>
-    <value>org.apache.log4j.varia.NullAppender</value>
-  </property>
-  <property>
-    <name>log4j.appender.EventCounter</name>
-    <value>org.apache.hadoop.log.metrics.EventCounter</value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/hdfs-site.xml
deleted file mode 100644
index 1fc6c59..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/hdfs-site.xml
+++ /dev/null
@@ -1,476 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-  <!-- file system properties -->
-
-  <property>
-    <name>dfs.name.dir</name>
-    <!-- cluster variant -->
-    <value>/hadoop/hdfs/namenode</value>
-    <description>Determines where on the local filesystem the DFS name node
-      should store the name table.  If this is a comma-delimited list
-      of directories then the name table is replicated in all of the
-      directories, for redundancy. </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.support.append</name>
-    <value>true</value>
-    <description>to enable dfs append</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.webhdfs.enabled</name>
-    <value>true</value>
-    <description>to enable webhdfs</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.datanode.socket.write.timeout</name>
-    <value>0</value>
-    <description>DFS Client write socket timeout</description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.failed.volumes.tolerated</name>
-    <value>0</value>
-    <description>#of failed disks dn would tolerate</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.block.local-path-access.user</name>
-    <value>hbase</value>
-    <description>the user who is allowed to perform short
-      circuit reads.
-    </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.data.dir</name>
-    <value>/hadoop/hdfs/data</value>
-    <description>Determines where on the local filesystem an DFS data node
-      should store its blocks.  If this is a comma-delimited
-      list of directories, then data will be stored in all named
-      directories, typically on different devices.
-      Directories that do not exist are ignored.
-    </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.hosts.exclude</name>
-    <value>/etc/hadoop/conf/dfs.exclude</value>
-    <description>Names a file that contains a list of hosts that are
-      not permitted to connect to the namenode.  The full pathname of the
-      file must be specified.  If the value is empty, no hosts are
-      excluded.</description>
-  </property>
-
-  <property>
-    <name>dfs.hosts</name>
-    <value>/etc/hadoop/conf/dfs.include</value>
-    <description>Names a file that contains a list of hosts that are
-      permitted to connect to the namenode. The full pathname of the file
-      must be specified.  If the value is empty, all hosts are
-      permitted.</description>
-  </property>
-
-  <property>
-    <name>dfs.replication.max</name>
-    <value>50</value>
-    <description>Maximal block replication.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.replication</name>
-    <value>3</value>
-    <description>Default block replication.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.heartbeat.interval</name>
-    <value>3</value>
-    <description>Determines datanode heartbeat interval in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.safemode.threshold.pct</name>
-    <value>1.0f</value>
-    <description>
-      Specifies the percentage of blocks that should satisfy
-      the minimal replication requirement defined by dfs.replication.min.
-      Values less than or equal to 0 mean not to start in safe mode.
-      Values greater than 1 will make safe mode permanent.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.balance.bandwidthPerSec</name>
-    <value>6250000</value>
-    <description>
-      Specifies the maximum amount of bandwidth that each datanode
-      can utilize for the balancing purpose in term of
-      the number of bytes per second.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.https.port</name>
-    <value>50070</value>
-    <description>
-      This property is used by HftpFileSystem.
-    </description>
-  </property>
-
-  <property>
-    <name>ambari.dfs.datanode.port</name>
-    <value>50010</value>
-    <description>
-      The datanode port for data transfer. This property is effective only if referenced from dfs.datanode.address property.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.address</name>
-    <value>0.0.0.0:${ambari.dfs.datanode.port}</value>
-    <description>
-      The datanode server address and port for data transfer.
-    </description>
-  </property>
-
-  <property>
-    <name>ambari.dfs.datanode.http.port</name>
-    <value>50075</value>
-    <description>
-      The datanode http port. This property is effective only if referenced from dfs.datanode.http.address property.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.http.address</name>
-    <value>0.0.0.0:${ambari.dfs.datanode.http.port}</value>
-    <description>
-      The datanode http server address and port.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.block.size</name>
-    <value>134217728</value>
-    <description>The default block size for new files.</description>
-  </property>
-
-  <property>
-    <name>dfs.http.address</name>
-    <value>localhost:50070</value>
-    <description>The name of the default file system.  Either the
-      literal string "local" or a host:port for NDFS.</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.datanode.du.reserved</name>
-    <!-- cluster variant -->
-    <value>1073741824</value>
-    <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.ipc.address</name>
-    <value>0.0.0.0:8010</value>
-    <description>
-      The datanode ipc server address and port.
-      If the port is 0 then the server will start on a free port.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.blockreport.initialDelay</name>
-    <value>120</value>
-    <description>Delay for first block report in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.du.pct</name>
-    <value>0.85f</value>
-    <description>When calculating remaining space, only use this percentage of the real available space
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.handler.count</name>
-    <value>40</value>
-    <description>The number of server threads for the namenode.</description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.max.xcievers</name>
-    <value>4096</value>
-    <description>PRIVATE CONFIG VARIABLE</description>
-  </property>
-
-  <!-- Permissions configuration -->
-
-  <property>
-    <name>dfs.umaskmode</name>
-    <value>077</value>
-    <description>
-      The octal umask used when creating files and directories.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.web.ugi</name>
-    <!-- cluster variant -->
-    <value>gopher,gopher</value>
-    <description>The user account used by the web interface.
-      Syntax: USERNAME,GROUP1,GROUP2, ...
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.permissions</name>
-    <value>true</value>
-    <description>
-      If "true", enable permission checking in HDFS.
-      If "false", permission checking is turned off,
-      but all other behavior is unchanged.
-      Switching from one parameter value to the other does not change the mode,
-      owner or group of files or directories.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.permissions.supergroup</name>
-    <value>hdfs</value>
-    <description>The name of the group of super-users.</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.handler.count</name>
-    <value>100</value>
-    <description>Added to grow Queue size so that more client connections are allowed</description>
-  </property>
-
-  <property>
-    <name>ipc.server.max.response.size</name>
-    <value>5242880</value>
-  </property>
-  <property>
-    <name>dfs.block.access.token.enable</name>
-    <value>true</value>
-    <description>
-      If "true", access tokens are used as capabilities for accessing datanodes.
-      If "false", no access tokens are checked on accessing datanodes.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.kerberos.principal</name>
-    <value></value>
-    <description>
-      Kerberos principal name for the NameNode
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.kerberos.principal</name>
-    <value></value>
-    <description>
-      Kerberos principal name for the secondary NameNode.
-    </description>
-  </property>
-
-
-  <!--
-    This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
-  -->
-  <property>
-    <name>dfs.namenode.kerberos.https.principal</name>
-    <value></value>
-    <description>The Kerberos principal for the host that the NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.kerberos.https.principal</name>
-    <value></value>
-    <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <!-- cluster variant -->
-    <name>dfs.secondary.http.address</name>
-    <value>localhost:50090</value>
-    <description>Address of secondary namenode web server</description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.https.port</name>
-    <value>50490</value>
-    <description>The https port where secondary-namenode binds</description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.principal</name>
-    <value></value>
-    <description>
-      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
-      HTTP SPENGO specification.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.keytab</name>
-    <value></value>
-    <description>
-      The Kerberos keytab file with the credentials for the
-      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.kerberos.principal</name>
-    <value></value>
-    <description>
-      The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.keytab.file</name>
-    <value></value>
-    <description>
-      Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.keytab.file</name>
-    <value></value>
-    <description>
-      Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.keytab.file</name>
-    <value></value>
-    <description>
-      The filename of the keytab file for the DataNode.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.https.port</name>
-    <value>50470</value>
-    <description>The https port where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.https.address</name>
-    <value>localhost:50470</value>
-    <description>The https address where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.datanode.data.dir.perm</name>
-    <value>750</value>
-    <description>The permissions that should be there on dfs.data.dir
-      directories. The datanode will not come up if the permissions are
-      different on existing dfs.data.dir directories. If the directories
-      don't exist, they will be created with this permission.</description>
-  </property>
-
-  <property>
-    <name>dfs.access.time.precision</name>
-    <value>0</value>
-    <description>The access time for HDFS file is precise upto this value.
-      The default value is 1 hour. Setting a value of 0 disables
-      access times for HDFS.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.cluster.administrators</name>
-    <value> hdfs</value>
-    <description>ACL for who all can view the default servlets in the HDFS</description>
-  </property>
-
-  <property>
-    <name>ipc.server.read.threadpool.size</name>
-    <value>5</value>
-    <description></description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.failed.volumes.tolerated</name>
-    <value>0</value>
-    <description>Number of failed disks datanode would tolerate</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.avoid.read.stale.datanode</name>
-    <value>true</value>
-    <description>
-      Indicate whether or not to avoid reading from stale datanodes whose
-      heartbeat messages have not been received by the namenode for more than a
-      specified time interval.
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.avoid.write.stale.datanode</name>
-    <value>true</value>
-    <description>
-      Indicate whether or not to avoid writing to stale datanodes whose
-      heartbeat messages have not been received by the namenode for more than a
-      specified time interval.
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.write.stale.datanode.ratio</name>
-    <value>1.0f</value>
-    <description>When the ratio of number stale datanodes to total datanodes marked is greater
-      than this ratio, stop avoiding writing to stale nodes so as to prevent causing hotspots.
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.stale.datanode.interval</name>
-    <value>30000</value>
-    <description>Datanode is stale after not getting a heartbeat in this interval in ms</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/metainfo.xml
deleted file mode 100644
index d29d2fc..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/metainfo.xml
+++ /dev/null
@@ -1,147 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>HDFS</name>
-      <comment>Apache Hadoop Distributed File System</comment>
-      <version>1.2.0.1.3.3.0</version>
-
-      <components>
-        <component>
-          <name>NAMENODE</name>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <commandScript>
-            <script>scripts/namenode.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <customCommands>
-            <customCommand>
-              <name>DECOMMISSION</name>
-              <commandScript>
-                <script>scripts/namenode.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>600</timeout>
-              </commandScript>
-            </customCommand>
-          </customCommands>
-        </component>
-
-        <component>
-          <name>DATANODE</name>
-          <category>SLAVE</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts/datanode.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>SECONDARY_NAMENODE</name>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <commandScript>
-            <script>scripts/snamenode.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>HDFS_CLIENT</name>
-          <category>CLIENT</category>
-          <cardinality>0+</cardinality>
-          <commandScript>
-            <script>scripts/hdfs_client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>lzo</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop-libhdfs</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop-native</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop-pipes</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop-sbin</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop-lzo</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop-lzo-native</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>snappy</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>snappy-devel</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>ambari-log4j</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <configuration-dependencies>
-        <config-type>core-site</config-type>
-        <config-type>global</config-type>
-        <config-type>hdfs-site</config-type>
-        <config-type>hadoop-policy</config-type>
-        <config-type>hdfs-log4j</config-type>
-      </configuration-dependencies>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/files/checkForFormat.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/files/checkForFormat.sh
deleted file mode 100644
index d14091a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/files/checkForFormat.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-export hdfs_user=$1
-shift
-export conf_dir=$1
-shift
-export mark_dir=$1
-shift
-export name_dirs=$*
-
-export EXIT_CODE=0
-export command="namenode -format"
-export list_of_non_empty_dirs=""
-
-mark_file=/var/run/hadoop/hdfs/namenode-formatted
-if [[ -f ${mark_file} ]] ; then
-  rm -f ${mark_file}
-  mkdir -p ${mark_dir}
-fi
-
-if [[ ! -d $mark_dir ]] ; then
-  for dir in `echo $name_dirs | tr ',' ' '` ; do
-    echo "NameNode Dirname = $dir"
-    cmd="ls $dir | wc -l  | grep -q ^0$"
-    eval $cmd
-    if [[ $? -ne 0 ]] ; then
-      (( EXIT_CODE = $EXIT_CODE + 1 ))
-      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
-    fi
-  done
-
-  if [[ $EXIT_CODE == 0 ]] ; then
-    su - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} ${command}"
-  else
-    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
-  fi
-else
-  echo "${mark_dir} exists. Namenode DFS already formatted"
-fi
-
-exit $EXIT_CODE
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/files/checkWebUI.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/files/checkWebUI.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/files/checkWebUI.py
deleted file mode 100644
index f8e9c1a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/files/checkWebUI.py
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import optparse
-import httplib
-
-#
-# Main.
-#
-def main():
-  parser = optparse.OptionParser(usage="usage: %prog [options] component ")
-  parser.add_option("-m", "--hosts", dest="hosts", help="Comma separated hosts list for WEB UI to check it availability")
-  parser.add_option("-p", "--port", dest="port", help="Port of WEB UI to check it availability")
-
-  (options, args) = parser.parse_args()
-  
-  hosts = options.hosts.split(',')
-  port = options.port
-
-  for host in hosts:
-    try:
-      conn = httplib.HTTPConnection(host, port)
-      # This can be modified to get a partial url part to be sent with request
-      conn.request("GET", "/")
-      httpCode = conn.getresponse().status
-      conn.close()
-    except Exception:
-      httpCode = 404
-
-    if httpCode != 200:
-      print "Cannot access WEB UI on: http://" + host + ":" + port
-      exit(1)
-      
-
-if __name__ == "__main__":
-  main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/43f14b34/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/datanode.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/datanode.py
deleted file mode 100644
index eaa27cf..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/datanode.py
+++ /dev/null
@@ -1,57 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from hdfs_datanode import datanode
-
-
-class DataNode(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-    self.config(env)
-    datanode(action="start")
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-    datanode(action="stop")
-
-  def config(self, env):
-    import params
-
-    datanode(action="configure")
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    check_process_status(status_params.datanode_pid_file)
-
-
-if __name__ == "__main__":
-  DataNode().execute()