You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by yu...@apache.org on 2014/11/14 03:19:57 UTC

[15/29] ambari git commit: AMBARI-8269. Merge branch-windows-dev changes to trunk. (Jayush Luniya via yusaku)

http://git-wip-us.apache.org/repos/asf/ambari/blob/8de3425f/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/files/draining_servers.rb
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/files/draining_servers.rb b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/files/draining_servers.rb
new file mode 100644
index 0000000..a3958a6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/files/draining_servers.rb
@@ -0,0 +1,164 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Add or remove servers from draining mode via zookeeper
+
+require 'optparse'
+include Java
+
+import org.apache.hadoop.hbase.HBaseConfiguration
+import org.apache.hadoop.hbase.client.HBaseAdmin
+import org.apache.hadoop.hbase.zookeeper.ZKUtil
+import org.apache.commons.logging.Log
+import org.apache.commons.logging.LogFactory
+
+# Name of this script
+NAME = "draining_servers"
+
+# Do command-line parsing
+options = {}
+optparse = OptionParser.new do |opts|
+  opts.banner = "Usage: ./hbase org.jruby.Main #{NAME}.rb [options] add|remove|list <hostname>|<host:port>|<servername> ..."
+  opts.separator 'Add remove or list servers in draining mode. Can accept either hostname to drain all region servers' +
+                 'in that host, a host:port pair or a host,port,startCode triplet. More than one server can be given separated by space'
+  opts.on('-h', '--help', 'Display usage information') do
+    puts opts
+    exit
+  end
+  options[:debug] = false
+  opts.on('-d', '--debug', 'Display extra debug logging') do
+    options[:debug] = true
+  end
+end
+optparse.parse!
+
+# Return array of servernames where servername is hostname+port+startcode
+# comma-delimited
+def getServers(admin)
+  serverInfos = admin.getClusterStatus().getServerInfo()
+  servers = []
+  for server in serverInfos
+    servers << server.getServerName()
+  end
+  return servers
+end
+
+def getServerNames(hostOrServers, config)
+  ret = []
+
+  for hostOrServer in hostOrServers
+    # check whether it is already serverName. No need to connect to cluster
+    parts = hostOrServer.split(',')
+    if parts.size() == 3
+      ret << hostOrServer
+    else
+      admin = HBaseAdmin.new(config) if not admin
+      servers = getServers(admin)
+
+      hostOrServer = hostOrServer.gsub(/:/, ",")
+      for server in servers
+        ret << server if server.start_with?(hostOrServer)
+      end
+    end
+  end
+
+  admin.close() if admin
+  return ret
+end
+
+def addServers(options, hostOrServers)
+  config = HBaseConfiguration.create()
+  servers = getServerNames(hostOrServers, config)
+
+  zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil)
+  parentZnode = zkw.drainingZNode
+
+  begin
+    for server in servers
+      node = ZKUtil.joinZNode(parentZnode, server)
+      ZKUtil.createAndFailSilent(zkw, node)
+    end
+  ensure
+    zkw.close()
+  end
+end
+
+def removeServers(options, hostOrServers)
+  config = HBaseConfiguration.create()
+  servers = getServerNames(hostOrServers, config)
+
+  zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil)
+  parentZnode = zkw.drainingZNode
+
+  begin
+    for server in servers
+      node = ZKUtil.joinZNode(parentZnode, server)
+      ZKUtil.deleteNodeFailSilent(zkw, node)
+    end
+  ensure
+    zkw.close()
+  end
+end
+
+# list servers in draining mode
+def listServers(options)
+  config = HBaseConfiguration.create()
+
+  zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil)
+  parentZnode = zkw.drainingZNode
+
+  servers = ZKUtil.listChildrenNoWatch(zkw, parentZnode)
+  servers.each {|server| puts server}
+end
+
+hostOrServers = ARGV[1..ARGV.size()]
+
+# Create a logger and disable the DEBUG-level annoying client logging
+def configureLogging(options)
+  apacheLogger = LogFactory.getLog(NAME)
+  # Configure log4j to not spew so much
+  unless (options[:debug])
+    logger = org.apache.log4j.Logger.getLogger("org.apache.hadoop.hbase")
+    logger.setLevel(org.apache.log4j.Level::WARN)
+    logger = org.apache.log4j.Logger.getLogger("org.apache.zookeeper")
+    logger.setLevel(org.apache.log4j.Level::WARN)
+  end
+  return apacheLogger
+end
+
+# Create a logger and save it to ruby global
+$LOG = configureLogging(options)
+case ARGV[0]
+  when 'add'
+    if ARGV.length < 2
+      puts optparse
+      exit 1
+    end
+    addServers(options, hostOrServers)
+  when 'remove'
+    if ARGV.length < 2
+      puts optparse
+      exit 1
+    end
+    removeServers(options, hostOrServers)
+  when 'list'
+    listServers(options)
+  else
+    puts optparse
+    exit 3
+end

http://git-wip-us.apache.org/repos/asf/ambari/blob/8de3425f/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase.py
new file mode 100644
index 0000000..a427fb3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase.py
@@ -0,0 +1,29 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+def hbase():
+  import params
+  XmlConfig("hbase-site.xml",
+             conf_dir = params.hbase_conf_dir,
+             configurations = params.config['configurations']['hbase-site']
+  )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/8de3425f/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase_client.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase_client.py
new file mode 100644
index 0000000..bf2aad0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase_client.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+class HbaseClient(Script):
+  def install(self, env):
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+  HbaseClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/8de3425f/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase_decommission.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase_decommission.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase_decommission.py
new file mode 100644
index 0000000..f2a2ad6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase_decommission.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+def hbase_decomission():
+  import params
+
+  File(params.region_drainer,
+       content=StaticFile("draining_servers.rb"),
+       owner=params.hbase_user,
+       mode="f"
+  )
+  if params.hbase_drain_only == True:
+    hosts = params.hbase_excluded_hosts.split(",")
+    for host in hosts:
+      if host:
+        regiondrainer_cmd = format(
+          "cmd /c {hbase_executable} org.jruby.Main {region_drainer} remove {host}")
+        Execute(regiondrainer_cmd,
+                user=params.hbase_user,
+                logoutput=True
+        )
+        pass
+    pass
+
+  else:
+
+    hosts = params.hbase_excluded_hosts.split(",")
+    for host in hosts:
+      if host:
+        regiondrainer_cmd = format(
+          "cmd /c {hbase_executable} org.jruby.Main {region_drainer} add {host}")
+        regionmover_cmd = format(
+          "cmd /c {hbase_executable} org.jruby.Main {region_mover} unload {host}")
+
+        Execute(regiondrainer_cmd,
+                user=params.hbase_user,
+                logoutput=True
+        )
+
+        Execute(regionmover_cmd,
+                user=params.hbase_user,
+                logoutput=True
+        )
+      pass
+    pass
+  pass
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/8de3425f/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase_master.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase_master.py
new file mode 100644
index 0000000..d3720d5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase_master.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from hbase import hbase
+import service_mapping
+from hbase_decommission import *
+
+class HbaseMaster(Script):
+  def install(self, env):
+    if not check_windows_service_exists(service_mapping.hbase_master_win_service_name):
+      self.install_packages(env)
+
+  def configure(self, env):
+    hbase()
+
+  def start(self, env):
+    import params
+    self.configure(env) # for security
+    Service(service_mapping.hbase_master_win_service_name, action="start")
+
+  def stop(self, env):
+    Service(service_mapping.hbase_master_win_service_name, action="stop")
+
+  def status(self, env):
+    check_windows_service_status(service_mapping.hbase_master_win_service_name)
+
+  def decommission(self, env):
+    import params
+    env.set_params(params)
+    hbase_decomission()
+
+if __name__ == "__main__":
+  HbaseMaster().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/8de3425f/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase_regionserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase_regionserver.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase_regionserver.py
new file mode 100644
index 0000000..d69d402
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/hbase_regionserver.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from hbase import hbase
+import service_mapping
+
+class HbaseRegionServer(Script):
+  def install(self, env):
+    if not check_windows_service_exists(service_mapping.hbase_regionserver_win_service_name):
+      self.install_packages(env)
+
+  def configure(self, env):
+    hbase()
+
+  def start(self, env):
+    import params
+    Service(service_mapping.hbase_regionserver_win_service_name, action="start")
+
+  def stop(self, env):
+    Service(service_mapping.hbase_regionserver_win_service_name, action="stop")
+
+  def status(self, env):
+    check_windows_service_status(service_mapping.hbase_regionserver_win_service_name)
+
+  def decommission(self, env):
+    print "Decommission not yet implemented!"
+
+if __name__ == "__main__":
+  HbaseRegionServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/8de3425f/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/params.py
new file mode 100644
index 0000000..cd9a74a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/params.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import os
+
+# server configurations
+config = Script.get_config()
+hbase_conf_dir = os.environ["HBASE_CONF_DIR"]
+hbase_bin_dir = os.path.join(os.environ["HBASE_HOME"],'bin')
+hbase_executable = os.path.join(hbase_bin_dir,"hbase.cmd")
+hdp_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"],".."))
+hbase_user = "hadoop"
+
+#decomm params
+region_drainer = os.path.join(hbase_bin_dir,"draining_servers.rb")
+region_mover = os.path.join(hbase_bin_dir,"region_mover.rb")
+hbase_excluded_hosts = config['commandParams']['excluded_hosts']
+hbase_drain_only = config['commandParams']['mark_draining_only']
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/8de3425f/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/service_check.py
new file mode 100644
index 0000000..c12967b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/service_check.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.libraries.functions.format import format
+
+
+class HbaseServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    smoke_cmd = os.path.join(params.hdp_root,"Run-SmokeTests.cmd")
+    service = "HBASE"
+    Execute(format("cmd /C {smoke_cmd} {service}"), logoutput=True)
+
+if __name__ == "__main__":
+  HbaseServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/8de3425f/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/service_mapping.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/service_mapping.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/service_mapping.py
new file mode 100644
index 0000000..186b64e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HBASE/package/scripts/service_mapping.py
@@ -0,0 +1,21 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+hbase_master_win_service_name = "master"
+hbase_regionserver_win_service_name = "regionserver"

http://git-wip-us.apache.org/repos/asf/ambari/blob/8de3425f/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/core-site.xml
new file mode 100644
index 0000000..2dbda14
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/core-site.xml
@@ -0,0 +1,202 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!-- Put site-specific property overrides in this file. -->
+<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
+  <!-- i/o properties -->
+  <property>
+    <name>io.file.buffer.size</name>
+    <value>131072</value>
+    <description>The size of buffer for use in sequence files.
+  The size of this buffer should probably be a multiple of hardware
+  page size (4096 on Intel x86), and it determines how much data is
+  buffered during read and write operations.</description>
+  </property>
+  <property>
+    <name>io.serializations</name>
+    <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
+  </property>
+  <property>
+    <name>io.compression.codecs</name>
+    <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec</value>
+    <description>A list of the compression codec classes that can be used
+                 for compression/decompression.</description>
+  </property>
+  <!-- file system properties -->
+  <property>
+    <name>fs.defaultFS</name>
+    <value>hdfs://localhost:8020</value>
+    <description>The name of the default file system.  Either the
+  literal string "local" or a host:port for NDFS.</description>
+    <final>true</final>
+  </property>
+  <property>
+    <name>fs.trash.interval</name>
+    <value>360</value>
+    <description>Number of minutes after which the checkpoint
+      gets deleted.  If zero, the trash feature is disabled.
+    </description>
+  </property>
+  <property>
+    <name>fs.trash.checkpoint.interval</name>
+    <value>0</value>
+    <description>Number of minutes between trash checkpoints.
+      Should be smaller or equal to fs.trash.interval. If zero,
+      the value is set to the value of fs.trash.interval.
+    </description>
+  </property>
+  <property>
+    <name>fs.permissions.umask-mode</name>
+    <value>022</value>
+    <description>The umask used when creating files and directories.
+      Can be in octal or in symbolic. Examples are: "022" (octal for
+      u=rwx,g=r-x,o=r-x in symbolic), or "u=rwx,g=rwx,o=" (symbolic
+      for 007 in octal).</description>
+  </property>
+  <property>
+    <name>ipc.client.idlethreshold</name>
+    <value>8000</value>
+    <description>Defines the threshold number of connections after which
+               connections will be inspected for idleness.
+    </description>
+  </property>
+  <property>
+    <name>ipc.client.connection.maxidletime</name>
+    <value>30000</value>
+    <description>The maximum time after which a client will bring down the
+               connection to the server.
+    </description>
+  </property>
+  <property>
+    <name>ipc.client.connect.max.retries</name>
+    <value>50</value>
+    <description>Defines the maximum number of retries for IPC connections.</description>
+  </property>
+  <!-- Web Interface Configuration -->
+  <property>
+    <name>hadoop.http.staticuser.user</name>
+    <value>gopher</value>
+    <description>
+      The user name to filter as, on static web filters
+      while rendering content. An example use is the HDFS
+      web UI (user to be used for browsing files).
+    </description>
+  </property>
+  <property>
+    <name>webinterface.private.actions</name>
+    <value>false</value>
+    <description> If set to true, the web interfaces of RM and NN may contain
+                actions, such as kill job, delete file, etc., that should
+                not be exposed to public. Enable this option if the interfaces
+                are only reachable by those who have the right authorization.
+    </description>
+  </property>
+  <property>
+    <name>hadoop.security.authentication</name>
+    <value>simple</value>
+    <description>
+      Set the authentication for the cluster. Valid values are: simple or
+      kerberos.
+    </description>
+  </property>
+  <property>
+    <name>hadoop.security.authorization</name>
+    <value>false</value>
+    <description>
+     Enable authorization for different protocols.
+  </description>
+  </property>
+  <property>
+    <name>hadoop.proxyuser.hadoop.groups</name>
+    <value>HadoopUsers</value>
+    <description>
+     Proxy group for Hadoop.
+  </description>
+  </property>
+  <property>
+    <name>hadoop.ssl.enabled</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>hadoop.ssl.require.client.cert</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>hadoop.ssl.hostname.verifier</name>
+    <value>DEFAULT</value>
+  </property>
+  <property>
+    <name>hadoop.ssl.keystores.factory.class</name>
+    <value>org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory</value>
+  </property>
+  <property>
+    <name>hadoop.ssl.server.conf</name>
+    <value>ssl-server.xml</value>
+  </property>
+  <property>
+    <name>hadoop.ssl.client.conf</name>
+    <value>ssl-client.xml</value>
+  </property>
+  <property>
+    <name>hadoop.rpc.protection</name>
+    <value>authentication</value>
+  </property>
+  <property>
+    <name>hadoop.tmp.dir</name>
+    <value>c:\hdp\temp\hadoop</value>
+  </property>
+
+  <property>
+    <name>hadoop.proxyuser.hadoop.hosts</name>
+    <value>192.168.145.128</value>
+  </property>
+
+  <property>
+    <name>hadoop.security.auth_to_local</name>
+    <value>
+        RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/
+        RULE:[2:$1@$0](jhs@.*)s/.*/mapred/
+        RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/
+        RULE:[2:$1@$0](hm@.*)s/.*/hbase/
+        RULE:[2:$1@$0](rs@.*)s/.*/hbase/
+        DEFAULT
+    </value>
+<description>The mapping from kerberos principal names to local OS mapreduce.job.user.names.
+  So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
+  "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
+The translations rules have 3 sections:
+      base     filter    substitution
+The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
+
+[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
+[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
+[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
+
+The filter is a regex in parens that must the generated string for the rule to apply.
+
+"(.*%admin)" will take any string that ends in "%admin"
+"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
+
+Finally, the substitution is a sed rule to translate a regex into a fixed string.
+
+"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
+"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
+"s/X/Y/g" replaces all of the "X" in the name with "Y"
+
+So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
+
+RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+DEFAULT
+
+To also translate the names with a second component, you'd make the rules:
+
+RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+RULE:[2:$1@$0](.@ACME.ORG)s/@.//
+DEFAULT
+
+If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
+
+RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
+DEFAULT
+    </description>
+  </property>
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/8de3425f/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-env.xml
new file mode 100644
index 0000000..85ca907
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-env.xml
@@ -0,0 +1,41 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+
+ Copyright 2011 The Apache Software Foundation
+
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+  <property>
+    <name>dfs.datanode.data.dir.mount.file</name>
+    <value>c:\hadoop\hdfs\dfs_data_dir_mount.hist</value>
+    <description>File path that contains the last known mount point for each data dir. This file is used to avoid creating a DFS data dir on the root drive (and filling it up) if a path was previously mounted on a drive.</description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.data.dir.mount.file</name>
+    <value>file:///c:/hadoop/conf/dfs_data_dir_mount.hist</value>
+    <description>File path that contains the last known mount point for each data dir. This file is used to avoid creating a DFS data dir on the root drive (and filling it up) if a path was previously mounted on a drive.</description>
+  </property>
+
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/8de3425f/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-policy.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-policy.xml
new file mode 100644
index 0000000..1549b41
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-policy.xml
@@ -0,0 +1,219 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+
+ Copyright 2011 The Apache Software Foundation
+
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration supports_final="true">
+  <property>
+    <name>security.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ClientProtocol, which is used by user code
+    via the DistributedFileSystem.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.client.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
+    for block recovery.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for DatanodeProtocol, which is used by datanodes to
+    communicate with the namenode.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.inter.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
+    for updating generation timestamp.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.namenode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for NamenodeProtocol, the protocol used by the secondary
+    namenode to communicate with the namenode.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+ <property>
+    <name>security.admin.operations.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for AdminOperationsProtocol. Used for admin commands.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.refresh.usertogroups.mappings.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
+    users mappings. The ACL is a comma-separated list of user and
+    group names. The user and group list is separated by a blank. For
+    e.g. "alice,bob users,wheel".  A special value of "*" means all
+    users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.refresh.policy.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
+    dfsadmin and mradmin commands to refresh the security policy in-effect.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.ha.service.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HAService protocol used by HAAdmin to manage the
+      active and stand-by states of namenode.</description>
+  </property>
+
+  <property>
+    <name>security.zkfc.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for access to the ZK Failover Controller
+    </description>
+  </property>
+
+  <property>
+    <name>security.qjournal.service.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for QJournalProtocol, used by the NN to communicate with
+    JNs when using the QuorumJournalManager for edit logs.</description>
+  </property>
+
+  <property>
+    <name>security.mrhs.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HSClientProtocol, used by job clients to
+    communciate with the MR History Server job status etc.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <!-- YARN Protocols -->
+
+  <property>
+    <name>security.resourcetracker.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ResourceTrackerProtocol, used by the
+    ResourceManager and NodeManager to communicate with each other.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.resourcemanager-administration.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ResourceManagerAdministrationProtocol, for admin commands.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.applicationclient.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ApplicationClientProtocol, used by the ResourceManager
+    and applications submission clients to communicate with each other.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.applicationmaster.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ApplicationMasterProtocol, used by the ResourceManager
+    and ApplicationMasters to communicate with each other.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.containermanagement.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ContainerManagementProtocol protocol, used by the NodeManager
+    and ApplicationMasters to communicate with each other.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.resourcelocalizer.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ResourceLocalizer protocol, used by the NodeManager
+    and ResourceLocalizer to communicate with each other.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.job.task.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
+    tasks to communicate with the parent tasktracker.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.job.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for MRClientProtocol, used by job clients to
+    communciate with the MR ApplicationMaster to query job status etc.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8de3425f/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hdfs-site.xml
new file mode 100644
index 0000000..f267e51
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hdfs-site.xml
@@ -0,0 +1,272 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!-- Put site-specific property overrides in this file. -->
+<configuration supports_final="true">
+  <!-- file system properties -->
+  <property>
+    <name>dfs.namenode.name.dir</name>
+    <value>file:///c:/hdpdata/hdfs/nn</value>
+    <description>Determines where on the local filesystem the DFS name node
+      should store the name table.  If this is a comma-delimited list
+      of directories then the name table is replicated in all of the
+      directories, for redundancy. </description>
+    <final>true</final>
+  </property>
+  <property>
+    <name>dfs.support.append</name>
+    <value>true</value>
+    <description>to enable dfs append</description>
+    <final>true</final>
+  </property>
+  <property>
+    <name>dfs.webhdfs.enabled</name>
+    <value>true</value>
+    <description>to enable webhdfs</description>
+    <final>true</final>
+  </property>
+  <property>
+    <name>dfs.datanode.failed.volumes.tolerated</name>
+    <value>0</value>
+    <description>#of failed disks dn would tolerate</description>
+    <final>true</final>
+  </property>
+  <property>
+    <name>dfs.datanode.data.dir</name>
+    <value>file:///c:/hdpdata/hdfs/dn</value>
+    <description>Determines where on the local filesystem an DFS data node
+    should store its blocks.  If this is a comma-delimited
+    list of directories, then data will be stored in all named
+    directories, typically on different devices.
+    Directories that do not exist are ignored.
+    </description>
+    <final>true</final>
+  </property>
+  <property>
+    <name>dfs.checksum.type</name>
+    <value>CRC32</value>
+    <description>The checksum method to be used by default. To maintain
+    compatibility, it is being set to CRC32. Once all migration steps
+    are complete, we can change it to CRC32C and take advantage of the
+    additional performance benefit.</description>
+  </property>
+  <property>
+    <name>dfs.replication.max</name>
+    <value>50</value>
+    <description>Maximal block replication.
+  </description>
+  </property>
+  <property>
+    <name>dfs.heartbeat.interval</name>
+    <value>3</value>
+    <description>Determines datanode heartbeat interval in seconds.</description>
+  </property>
+  <property>
+    <name>dfs.namenode.safemode.threshold-pct</name>
+    <value>1.0f</value>
+    <description>
+        Specifies the percentage of blocks that should satisfy
+        the minimal replication requirement defined by dfs.replication.min.
+        Values less than or equal to 0 mean not to start in safe mode.
+        Values greater than 1 will make safe mode permanent.
+        </description>
+  </property>
+  <property>
+    <name>dfs.datanode.balance.bandwidthPerSec</name>
+    <value>6250000</value>
+    <description>
+        Specifies the maximum amount of bandwidth that each datanode
+        can utilize for the balancing purpose in term of
+        the number of bytes per second.
+  </description>
+  </property>
+  <property>
+    <name>dfs.datanode.address</name>
+    <value>0.0.0.0:50010</value>
+  </property>
+  <property>
+    <name>dfs.datanode.http.address</name>
+    <value>0.0.0.0:50075</value>
+  </property>
+  <property>
+    <name>dfs.datanode.https.address</name>
+    <value>0.0.0.0:50076</value>
+  </property>
+  <property>
+    <name>dfs.blocksize</name>
+    <value>134217728</value>
+    <description>The default block size for new files, in bytes.
+      You can use the following suffix (case insensitive): k(kilo),
+      m(mega), g(giga), t(tera), p(peta), e(exa) to specify the
+      size (such as 128k, 512m, 1g, etc.), Or provide complete size
+      in bytes (such as 134217728 for 128 MB).</description>
+  </property>
+  <property>
+    <name>dfs.namenode.http-address</name>
+    <value>localhost:50070</value>
+    <description>The address and the base port where the dfs namenode
+      web ui will listen on. If the port is 0 then the server will
+      start on a free port.</description>
+    <final>true</final>
+  </property>
+  <property>
+    <name>dfs.https.port</name>
+    <value>50070</value>
+    <final>true</final>
+  </property>
+  <property>
+    <name>dfs.datanode.du.reserved</name>
+    <value>1073741824</value>
+    <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+    </description>
+  </property>
+  <property>
+    <name>dfs.datanode.ipc.address</name>
+    <value>0.0.0.0:8010</value>
+    <description>The datanode ipc server address and port.
+      If the port is 0 then the server will start on a free port.
+    </description>
+  </property>
+  <property>
+    <name>dfs.blockreport.initialDelay</name>
+    <value>120</value>
+    <description>Delay for first block report in seconds.</description>
+  </property>
+  <property>
+    <name>dfs.datanode.du.pct</name>
+    <value>0.85f</value>
+    <description>When calculating remaining space, only use this percentage of the real available space
+    </description>
+  </property>
+  <property>
+    <name>dfs.namenode.handler.count</name>
+    <value>40</value>
+    <description>The number of server threads for the namenode.</description>
+  </property>
+  <property>
+    <name>dfs.namenode.checkpoint.dir</name>
+    <value>file:///c:/hdpdata/hdfs/snn</value>
+    <description>Determines where on the local filesystem the DFS secondary
+        name node should store the temporary images to merge.
+        If this is a comma-delimited list of directories then the image is
+        replicated in all of the directories for redundancy.
+    </description>
+  </property>
+  <property>
+    <name>dfs.namenode.checkpoint.edits.dir</name>
+    <value>file:///c:/hadoop/hdfs/namesecondary</value>
+    <description>Determines where on the local filesystem the DFS secondary
+        name node should store the temporary edits to merge.
+        If this is a comma-delimited list of directoires then teh edits is
+        replicated in all of the directoires for redundancy.
+        Default value is same as dfs.namenode.checkpoint.dir
+    </description>
+  </property>
+  <property>
+    <name>dfs.namenode.checkpoint.period</name>
+    <value>86400</value>
+    <description>The number of seconds between two periodic checkpoints.
+    </description>
+  </property>
+  <property>
+    <name>dfs.datanode.max.transfer.threads</name>
+    <value>1024</value>
+    <description>Specifies the maximum number of threads to use for
+      transferring data in and out of the DN.</description>
+  </property>
+  <!-- Permissions configuration -->
+  <property>
+    <name>dfs.permissions.enabled</name>
+    <value>true</value>
+    <description>
+        If "true", enable permission checking in HDFS.
+        If "false", permission checking is turned off,
+        but all other behavior is unchanged.
+        Switching from one parameter value to the other does not change the mode,
+        owner or group of files or directories.
+    </description>
+  </property>
+  <property>
+    <name>dfs.permissions.superusergroup</name>
+    <value>hdfs</value>
+    <description>The name of the group of super-users.</description>
+  </property>
+  <property>
+    <name>ipc.server.max.response.size</name>
+    <value>5242880</value>
+  </property>
+  <property>
+    <name>dfs.block.access.token.enable</name>
+    <value>false</value>
+    <description>
+        If "true", access tokens are used as capabilities for accessing datanodes.
+        If "false", no access tokens are checked on accessing datanodes.
+    </description>
+  </property>
+  <property>
+    <name>dfs.namenode.secondary.http-address</name>
+    <value>localhost:50090</value>
+    <description>Address of secondary namenode web server</description>
+  </property>
+  <property>
+    <name>dfs.secondary.https.port</name>
+    <value>50091</value>
+    <description>The https port where secondary-namenode binds</description>
+  </property>
+  <property>
+    <name>dfs.namenode.https-address</name>
+    <value>localhost:50701</value>
+    <description>The https address where namenode binds</description>
+  </property>
+  <property>
+    <name>dfs.datanode.data.dir.perm</name>
+    <value>755</value>
+    <description>The permissions that should be there on dfs.data.dir
+        directories. The datanode will not come up if the permissions are
+        different on existing dfs.data.dir directories. If the directories
+        don't exist, they will be created with this permission.</description>
+  </property>
+  <property>
+    <name>dfs.namenode.accesstime.precision</name>
+    <value>0</value>
+    <description>The access time for HDFS file is precise upto this value.
+               The default value is 1 hour. Setting a value of 0 disables
+               access times for HDFS.
+    </description>
+  </property>
+  <property>
+    <name>dfs.cluster.administrators</name>
+    <value>hdfs</value>
+    <description>ACL for who all can view the default servlets in the HDFS</description>
+  </property>
+  <property>
+    <name>ipc.server.read.threadpool.size</name>
+    <value>5</value>
+    <description />
+  </property>
+  <property>
+    <name>dfs.encrypt.data.transfer</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>dfs.encrypt.data.transfer.algorithm</name>
+    <value>3des</value>
+  </property>
+  <property>
+    <name>dfs.https.enable</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>dfs.replication</name>
+    <value>1</value>
+  </property>
+
+  <property>
+    <name>dfs.hosts.exclude</name>
+    <value>c:\hdp\hadoop\etc\hadoop\dfs.exclude</value>
+  </property>
+
+  <property>
+    <name>dfs.hosts</name>
+    <value>c:\hdp\hadoop\etc\hadoop\dfs.include</value>
+  </property>
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/8de3425f/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/metainfo.xml
new file mode 100644
index 0000000..127e205
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/metainfo.xml
@@ -0,0 +1,162 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HDFS</name>
+      <displayName>HDFS</displayName>
+      <comment>Apache Hadoop Distributed File System</comment>
+      <version>2.1.0.2.0</version>
+
+      <components>
+        <component>
+          <name>NAMENODE</name>
+          <displayName>NameNode</displayName>
+          <category>MASTER</category>
+          <cardinality>1-2</cardinality>
+          <commandScript>
+            <script>scripts/namenode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/namenode.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+            <customCommand>
+              <name>REBALANCEHDFS</name>
+              <background>true</background>
+              <commandScript>
+                <script>scripts/namenode.py</script>
+                <scriptType>PYTHON</scriptType>
+              </commandScript>
+            </customCommand>
+          </customCommands>
+        </component>
+
+        <component>
+          <name>DATANODE</name>
+          <displayName>DataNode</displayName>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <commandScript>
+            <script>scripts/datanode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>SECONDARY_NAMENODE</name>
+          <displayName>SNameNode</displayName>
+          <!-- TODO:  cardinality is conditional on HA usage -->
+          <cardinality>1</cardinality>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/snamenode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>HDFS_CLIENT</name>
+          <displayName>HDFS Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>1+</cardinality>
+          <commandScript>
+            <script>scripts/hdfs_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>xml</type>
+              <fileName>hdfs-site.xml</fileName>
+              <dictionaryName>hdfs-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>xml</type>
+              <fileName>core-site.xml</fileName>
+              <dictionaryName>core-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>log4j.properties</fileName>
+              <dictionaryName>hdfs-log4j,yarn-log4j</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>hadoop-env.cmds</fileName>
+              <dictionaryName>hadoop-env</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+
+        <component>
+          <name>JOURNALNODE</name>
+          <displayName>JournalNode</displayName>
+          <category>SLAVE</category>
+          <cardinality>0+</cardinality>
+          <commandScript>
+            <script>scripts/journalnode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>ZKFC</name>
+          <displayName>ZKFailoverController</displayName>
+          <category>SLAVE</category>
+          <!-- TODO: cardinality is conditional on HA topology -->
+          <cardinality>0+</cardinality>
+          <commandScript>
+            <script>scripts/zkfc_slave.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+      </components>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <requiredServices>
+        <service>ZOOKEEPER</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>core-site</config-type>
+        <config-type>hdfs-site</config-type>
+        <config-type>hadoop-env</config-type>
+        <config-type>hadoop-policy</config-type>
+        <config-type>hdfs-log4j</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>