You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ma...@apache.org on 2012/11/07 09:13:24 UTC
svn commit: r1406489 [18/19] - in /incubator/ambari/branches/AMBARI-666: ./
ambari-agent/ ambari-agent/conf/ ambari-agent/conf/unix/
ambari-agent/src/main/puppet/manifestloader/
ambari-agent/src/main/puppet/modules/configgenerator/manifests/ ambari-age...
Added: incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/hdfs-site.xml
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/hdfs-site.xml?rev=1406489&view=auto
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/hdfs-site.xml (added)
+++ incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/hdfs-site.xml Wed Nov 7 08:13:12 2012
@@ -0,0 +1,403 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+<!-- file system properties -->
+
+ <property>
+ <name>dfs.name.dir</name>
+ <!-- cluster variant -->
+ <value>/mnt/hmc/hadoop/hdfs/namenode</value>
+ <description>Determines where on the local filesystem the DFS name node
+ should store the name table. If this is a comma-delimited list
+ of directories then the name table is replicated in all of the
+ directories, for redundancy. </description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.support.append</name>
+ <value>true</value>
+ <description>to enable dfs append</description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.webhdfs.enabled</name>
+ <value>false</value>
+ <description>to enable webhdfs</description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.datanode.failed.volumes.tolerated</name>
+ <value>0</value>
+ <description>#of failed disks dn would tolerate</description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.block.local-path-access.user</name>
+ <value>hbase</value>
+ <description>the user who is allowed to perform short
+ circuit reads.
+ </description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.data.dir</name>
+ <value>/mnt/hmc/hadoop/hdfs/data</value>
+ <description>Determines where on the local filesystem an DFS data node
+ should store its blocks. If this is a comma-delimited
+ list of directories, then data will be stored in all named
+ directories, typically on different devices.
+ Directories that do not exist are ignored.
+ </description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.hosts.exclude</name>
+ <value>/etc/hadoop/conf/dfs.exclude</value>
+ <description>Names a file that contains a list of hosts that are
+ not permitted to connect to the namenode. The full pathname of the
+ file must be specified. If the value is empty, no hosts are
+ excluded.</description>
+ </property>
+
+ <property>
+ <name>dfs.hosts</name>
+ <value>/etc/hadoop/conf/dfs.include</value>
+ <description>Names a file that contains a list of hosts that are
+ permitted to connect to the namenode. The full pathname of the file
+ must be specified. If the value is empty, all hosts are
+ permitted.</description>
+ </property>
+
+ <property>
+ <name>dfs.replication.max</name>
+ <value>50</value>
+ <description>Maximal block replication.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.replication</name>
+ <value>3</value>
+ <description>Default block replication.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.heartbeat.interval</name>
+ <value>3</value>
+ <description>Determines datanode heartbeat interval in seconds.</description>
+ </property>
+
+ <property>
+ <name>dfs.safemode.threshold.pct</name>
+ <value>1.0f</value>
+ <description>
+ Specifies the percentage of blocks that should satisfy
+ the minimal replication requirement defined by dfs.replication.min.
+ Values less than or equal to 0 mean not to start in safe mode.
+ Values greater than 1 will make safe mode permanent.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.balance.bandwidthPerSec</name>
+ <value>6250000</value>
+ <description>
+ Specifies the maximum amount of bandwidth that each datanode
+ can utilize for the balancing purpose in term of
+ the number of bytes per second.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.datanode.address</name>
+ <value>0.0.0.0:50010</value>
+ </property>
+
+ <property>
+ <name>dfs.datanode.http.address</name>
+ <value>0.0.0.0:50075</value>
+ </property>
+
+ <property>
+ <name>dfs.block.size</name>
+ <value>134217728</value>
+ <description>The default block size for new files.</description>
+ </property>
+
+ <property>
+ <name>dfs.http.address</name>
+ <value>hdp1.cybervisiontech.com.ua:50070</value>
+<description>The name of the default file system. Either the
+literal string "local" or a host:port for NDFS.</description>
+<final>true</final>
+</property>
+
+<property>
+<name>dfs.datanode.du.reserved</name>
+<!-- cluster variant -->
+<value>1073741824</value>
+<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+</description>
+</property>
+
+<property>
+<name>dfs.datanode.ipc.address</name>
+<value>0.0.0.0:8010</value>
+<description>
+The datanode ipc server address and port.
+If the port is 0 then the server will start on a free port.
+</description>
+</property>
+
+<property>
+<name>dfs.blockreport.initialDelay</name>
+<value>120</value>
+<description>Delay for first block report in seconds.</description>
+</property>
+
+<property>
+<name>dfs.datanode.du.pct</name>
+<value>0.85f</value>
+<description>When calculating remaining space, only use this percentage of the real available space
+</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>40</value>
+<description>The number of server threads for the namenode.</description>
+</property>
+
+<property>
+<name>dfs.datanode.max.xcievers</name>
+<value>1024</value>
+<description>PRIVATE CONFIG VARIABLE</description>
+</property>
+
+<!-- Permissions configuration -->
+
+<property>
+<name>dfs.umaskmode</name>
+<value>077</value>
+<description>
+The octal umask used when creating files and directories.
+</description>
+</property>
+
+<property>
+<name>dfs.web.ugi</name>
+<!-- cluster variant -->
+<value>gopher,gopher</value>
+<description>The user account used by the web interface.
+Syntax: USERNAME,GROUP1,GROUP2, ...
+</description>
+</property>
+
+<property>
+<name>dfs.permissions</name>
+<value>true</value>
+<description>
+If "true", enable permission checking in HDFS.
+If "false", permission checking is turned off,
+but all other behavior is unchanged.
+Switching from one parameter value to the other does not change the mode,
+owner or group of files or directories.
+</description>
+</property>
+
+<property>
+<name>dfs.permissions.supergroup</name>
+<value>hdfs</value>
+<description>The name of the group of super-users.</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>100</value>
+<description>Added to grow Queue size so that more client connections are allowed</description>
+</property>
+
+<property>
+<name>ipc.server.max.response.size</name>
+<value>5242880</value>
+</property>
+<property>
+<name>dfs.block.access.token.enable</name>
+<value>true</value>
+<description>
+If "true", access tokens are used as capabilities for accessing datanodes.
+If "false", no access tokens are checked on accessing datanodes.
+</description>
+</property>
+
+<property>
+<name>dfs.namenode.kerberos.principal</name>
+<value>nn/_HOST@</value>
+<description>
+Kerberos principal name for the NameNode
+</description>
+</property>
+
+<property>
+<name>dfs.secondary.namenode.kerberos.principal</name>
+<value>nn/_HOST@</value>
+ <description>
+ Kerberos principal name for the secondary NameNode.
+ </description>
+ </property>
+
+
+<!--
+ This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
+-->
+ <property>
+ <name>dfs.namenode.kerberos.https.principal</name>
+ <value>host/_HOST@</value>
+ <description>The Kerberos principal for the host that the NameNode runs on.</description>
+
+ </property>
+
+ <property>
+ <name>dfs.secondary.namenode.kerberos.https.principal</name>
+ <value>host/_HOST@</value>
+ <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
+
+ </property>
+
+ <property>
+ <!-- cluster variant -->
+ <name>dfs.secondary.http.address</name>
+ <value>hdp2.cybervisiontech.com.ua:50090</value>
+ <description>Address of secondary namenode web server</description>
+ </property>
+
+ <property>
+ <name>dfs.secondary.https.port</name>
+ <value>50490</value>
+ <description>The https port where secondary-namenode binds</description>
+ </property>
+
+ <property>
+ <name>dfs.web.authentication.kerberos.principal</name>
+ <value>HTTP/_HOST@</value>
+ <description>
+ The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+ The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
+ HTTP SPENGO specification.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.web.authentication.kerberos.keytab</name>
+ <value>/nn.service.keytab</value>
+ <description>
+ The Kerberos keytab file with the credentials for the
+ HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.datanode.kerberos.principal</name>
+ <value>dn/_HOST@</value>
+ <description>
+ The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.namenode.keytab.file</name>
+ <value>/nn.service.keytab</value>
+ <description>
+ Combined keytab file containing the namenode service and host principals.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.secondary.namenode.keytab.file</name>
+ <value>/nn.service.keytab</value>
+ <description>
+ Combined keytab file containing the namenode service and host principals.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.datanode.keytab.file</name>
+ <value>/dn.service.keytab</value>
+ <description>
+ The filename of the keytab file for the DataNode.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.https.port</name>
+ <value>50470</value>
+ <description>The https port where namenode binds</description>
+
+ </property>
+
+ <property>
+ <name>dfs.https.address</name>
+ <value>hdp1.cybervisiontech.com.ua:50470</value>
+ <description>The https address where namenode binds</description>
+
+ </property>
+
+ <property>
+ <name>dfs.datanode.data.dir.perm</name>
+ <value>750</value>
+<description>The permissions that should be there on dfs.data.dir
+directories. The datanode will not come up if the permissions are
+different on existing dfs.data.dir directories. If the directories
+don't exist, they will be created with this permission.</description>
+ </property>
+
+ <property>
+ <name>dfs.access.time.precision</name>
+ <value>0</value>
+ <description>The access time for HDFS file is precise upto this value.
+ The default value is 1 hour. Setting a value of 0 disables
+ access times for HDFS.
+ </description>
+</property>
+
+<property>
+ <name>dfs.cluster.administrators</name>
+ <value> hdfs</value>
+ <description>ACL for who all can view the default servlets in the HDFS</description>
+</property>
+
+<property>
+ <name>ipc.server.read.threadpool.size</name>
+ <value>5</value>
+ <description></description>
+</property>
+
+</configuration>
Added: incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/mapred-site.xml
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/mapred-site.xml?rev=1406489&view=auto
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/mapred-site.xml (added)
+++ incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/configuration/mapred-site.xml Wed Nov 7 08:13:12 2012
@@ -0,0 +1,407 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+<!-- file system properties -->
+
+ <property>
+ <name>dfs.name.dir</name>
+ <!-- cluster variant -->
+ <value>/mnt/hmc/hadoop/hdfs/namenode</value>
+ <description>Determines where on the local filesystem the DFS name node
+ should store the name table. If this is a comma-delimited list
+ of directories then the name table is replicated in all of the
+ directories, for redundancy. </description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.support.append</name>
+ <value>true</value>
+ <description>to enable dfs append</description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.webhdfs.enabled</name>
+ <value>false</value>
+ <description>to enable webhdfs</description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.datanode.failed.volumes.tolerated</name>
+ <value>0</value>
+ <description>#of failed disks dn would tolerate</description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.block.local-path-access.user</name>
+ <value>hbase</value>
+ <description>the user who is allowed to perform short
+ circuit reads.
+ </description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.data.dir</name>
+ <value>/mnt/hmc/hadoop/hdfs/data</value>
+ <description>Determines where on the local filesystem an DFS data node
+ should store its blocks. If this is a comma-delimited
+ list of directories, then data will be stored in all named
+ directories, typically on different devices.
+ Directories that do not exist are ignored.
+ </description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.hosts.exclude</name>
+ <value>/etc/hadoop/conf/dfs.exclude</value>
+ <description>Names a file that contains a list of hosts that are
+ not permitted to connect to the namenode. The full pathname of the
+ file must be specified. If the value is empty, no hosts are
+ excluded.</description>
+ </property>
+
+ <property>
+ <name>dfs.hosts</name>
+ <value>/etc/hadoop/conf/dfs.include</value>
+ <description>Names a file that contains a list of hosts that are
+ permitted to connect to the namenode. The full pathname of the file
+ must be specified. If the value is empty, all hosts are
+ permitted.</description>
+ </property>
+
+ <property>
+ <name>dfs.replication.max</name>
+ <value>50</value>
+ <description>Maximal block replication.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.replication</name>
+ <value>3</value>
+ <description>Default block replication.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.heartbeat.interval</name>
+ <value>3</value>
+ <description>Determines datanode heartbeat interval in seconds.</description>
+ </property>
+
+ <property>
+ <name>dfs.safemode.threshold.pct</name>
+ <value>1.0f</value>
+ <description>
+ Specifies the percentage of blocks that should satisfy
+ the minimal replication requirement defined by dfs.replication.min.
+ Values less than or equal to 0 mean not to start in safe mode.
+ Values greater than 1 will make safe mode permanent.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.balance.bandwidthPerSec</name>
+ <value>6250000</value>
+ <description>
+ Specifies the maximum amount of bandwidth that each datanode
+ can utilize for the balancing purpose in term of
+ the number of bytes per second.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.datanode.address</name>
+ <value>0.0.0.0:50010</value>
+ <description>Address where the datanode binds</description>
+ </property>
+
+ <property>
+ <name>dfs.datanode.http.address</name>
+ <value>0.0.0.0:50075</value>
+ <description>HTTP address for the datanode</description>
+ </property>
+
+ <property>
+ <name>dfs.block.size</name>
+ <value>134217728</value>
+ <description>The default block size for new files.</description>
+ </property>
+
+ <property>
+ <name>dfs.http.address</name>
+ <value>hdp1.cybervisiontech.com.ua:50070</value>
+<description>The name of the default file system. Either the
+literal string "local" or a host:port for NDFS.</description>
+<final>true</final>
+</property>
+
+<property>
+<name>dfs.datanode.du.reserved</name>
+<!-- cluster variant -->
+<value>1073741824</value>
+<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+</description>
+</property>
+
+<property>
+<name>dfs.datanode.ipc.address</name>
+<value>0.0.0.0:8010</value>
+<description>
+The datanode ipc server address and port.
+If the port is 0 then the server will start on a free port.
+</description>
+</property>
+
+<property>
+<name>dfs.blockreport.initialDelay</name>
+<value>120</value>
+<description>Delay for first block report in seconds.</description>
+</property>
+
+<property>
+<name>dfs.datanode.du.pct</name>
+<value>0.85f</value>
+<description>When calculating remaining space, only use this percentage of the real available space
+</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>40</value>
+<description>The number of server threads for the namenode.</description>
+</property>
+
+<property>
+<name>dfs.datanode.max.xcievers</name>
+<value>1024</value>
+<description>PRIVATE CONFIG VARIABLE</description>
+</property>
+
+<!-- Permissions configuration -->
+
+<property>
+<name>dfs.umaskmode</name>
+<value>077</value>
+<description>
+The octal umask used when creating files and directories.
+</description>
+</property>
+
+<property>
+<name>dfs.web.ugi</name>
+<!-- cluster variant -->
+<value>gopher,gopher</value>
+<description>The user account used by the web interface.
+Syntax: USERNAME,GROUP1,GROUP2, ...
+</description>
+</property>
+
+<property>
+<name>dfs.permissions</name>
+<value>true</value>
+<description>
+If "true", enable permission checking in HDFS.
+If "false", permission checking is turned off,
+but all other behavior is unchanged.
+Switching from one parameter value to the other does not change the mode,
+owner or group of files or directories.
+</description>
+</property>
+
+<property>
+<name>dfs.permissions.supergroup</name>
+<value>hdfs</value>
+<description>The name of the group of super-users.</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>100</value>
+<description>Added to grow Queue size so that more client connections are allowed</description>
+</property>
+
+<property>
+<name>ipc.server.max.response.size</name>
+<value>5242880</value>
+<description>The max response size for IPC</description>
+</property>
+
+<property>
+<name>dfs.block.access.token.enable</name>
+<value>true</value>
+<description>
+If "true", access tokens are used as capabilities for accessing datanodes.
+If "false", no access tokens are checked on accessing datanodes.
+</description>
+</property>
+
+<property>
+<name>dfs.namenode.kerberos.principal</name>
+<value>nn/_HOST@</value>
+<description>
+Kerberos principal name for the NameNode
+</description>
+</property>
+
+<property>
+<name>dfs.secondary.namenode.kerberos.principal</name>
+<value>nn/_HOST@</value>
+ <description>
+ Kerberos principal name for the secondary NameNode.
+ </description>
+ </property>
+
+
+<!--
+ This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
+-->
+ <property>
+ <name>dfs.namenode.kerberos.https.principal</name>
+ <value>host/_HOST@</value>
+ <description>The Kerberos principal for the host that the NameNode runs on.</description>
+
+ </property>
+
+ <property>
+ <name>dfs.secondary.namenode.kerberos.https.principal</name>
+ <value>host/_HOST@</value>
+ <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
+
+ </property>
+
+ <property>
+ <!-- cluster variant -->
+ <name>dfs.secondary.http.address</name>
+ <value>hdp2.cybervisiontech.com.ua:50090</value>
+ <description>Address of secondary namenode web server</description>
+ </property>
+
+ <property>
+ <name>dfs.secondary.https.port</name>
+ <value>50490</value>
+ <description>The https port where secondary-namenode binds</description>
+ </property>
+
+ <property>
+ <name>dfs.web.authentication.kerberos.principal</name>
+ <value>HTTP/_HOST@</value>
+ <description>
+ The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+ The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
+ HTTP SPENGO specification.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.web.authentication.kerberos.keytab</name>
+ <value>/nn.service.keytab</value>
+ <description>
+ The Kerberos keytab file with the credentials for the
+ HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.datanode.kerberos.principal</name>
+ <value>dn/_HOST@</value>
+ <description>
+ The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.namenode.keytab.file</name>
+ <value>/nn.service.keytab</value>
+ <description>
+ Combined keytab file containing the namenode service and host principals.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.secondary.namenode.keytab.file</name>
+ <value>/nn.service.keytab</value>
+ <description>
+ Combined keytab file containing the namenode service and host principals.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.datanode.keytab.file</name>
+ <value>/dn.service.keytab</value>
+ <description>
+ The filename of the keytab file for the DataNode.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.https.port</name>
+ <value>50470</value>
+ <description>The https port where namenode binds</description>
+
+ </property>
+
+ <property>
+ <name>dfs.https.address</name>
+ <value>hdp1.cybervisiontech.com.ua:50470</value>
+ <description>The https address where namenode binds</description>
+
+ </property>
+
+ <property>
+ <name>dfs.datanode.data.dir.perm</name>
+ <value>750</value>
+<description>The permissions that should be there on dfs.data.dir
+directories. The datanode will not come up if the permissions are
+different on existing dfs.data.dir directories. If the directories
+don't exist, they will be created with this permission.</description>
+ </property>
+
+ <property>
+ <name>dfs.access.time.precision</name>
+ <value>0</value>
+ <description>The access time for HDFS file is precise upto this value.
+ The default value is 1 hour. Setting a value of 0 disables
+ access times for HDFS.
+ </description>
+</property>
+
+<property>
+ <name>dfs.cluster.administrators</name>
+ <value> hdfs</value>
+ <description>ACL for who all can view the default servlets in the HDFS</description>
+</property>
+
+<property>
+ <name>ipc.server.read.threadpool.size</name>
+ <value>5</value>
+ <description>IPC thread size</description>
+</property>
+
+</configuration>
Added: incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/metainfo.xml
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/metainfo.xml?rev=1406489&view=auto
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/metainfo.xml (added)
+++ incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.1/services/MAPREDUCE/metainfo.xml Wed Nov 7 08:13:12 2012
@@ -0,0 +1,21 @@
+<?xml version="1.0"?>
+<metainfo>
+ <user>root</user>
+ <comment>This is comment for Mapred service</comment>
+ <version>1.0</version>
+ <components>
+ <component>
+ <name>JOBTRACKER</name>
+ <category>MASTER</category>
+ </component>
+ <component>
+ <name>TASKTRACKER</name>
+ <category>SLAVE</category>
+ </component>
+ <component>
+ <name>MAPREDUCE_CLIENT</name>
+ <category>CLIENT</category>
+ </component>
+ </components>
+
+</metainfo>
Added: incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.2/repos/repoinfo.xml
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.2/repos/repoinfo.xml?rev=1406489&view=auto
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.2/repos/repoinfo.xml (added)
+++ incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.2/repos/repoinfo.xml Wed Nov 7 08:13:12 2012
@@ -0,0 +1,57 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<reposinfo>
+ <os type="centos6">
+ <repo>
+ <baseurl>http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos6</baseurl>
+ <repoid>HDP-1.1.1.16</repoid>
+ <reponame>HDP</reponame>
+ </repo>
+ <repo>
+ <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos6</baseurl>
+ <repoid>HDP-UTILS-1.1.0.15</repoid>
+ <reponame>HDP-UTILS</reponame>
+ <mirrorslist></mirrorslist>
+ </repo>
+ <repo>
+ <baseurl></baseurl>
+ <repoid>epel</repoid>
+ <reponame>epel</reponame>
+ <mirrorslist><![CDATA[https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch]]></mirrorslist>
+ </repo>
+ </os>
+ <os type="centos5">
+ <repo>
+ <baseurl>http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos5</baseurl>
+ <repoid>HDP-1.1.1.16</repoid>
+ <reponame>HDP</reponame>
+ </repo>
+ <repo>
+ <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos5</baseurl>
+ <repoid>HDP-UTILS-1.1.0.15</repoid>
+ <reponame>HDP-UTILS</reponame>
+ <mirrorslist></mirrorslist>
+ </repo>
+ <repo>
+ <baseurl></baseurl>
+ <repoid>epel</repoid>
+ <reponame>epel</reponame>
+ <mirrorslist><![CDATA[https://mirrors.fedoraproject.org/metalink?repo=epel-5&arch=$basearch]]></mirrorslist>
+ </repo>
+ </os>
+</reposinfo>
Added: incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.2/services/HBASE/configuration/hbase-site.xml
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.2/services/HBASE/configuration/hbase-site.xml?rev=1406489&view=auto
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.2/services/HBASE/configuration/hbase-site.xml (added)
+++ incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.2/services/HBASE/configuration/hbase-site.xml Wed Nov 7 08:13:12 2012
@@ -0,0 +1,137 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+ <property>
+ <name>hbase.regionserver.msginterval</name>
+ <value>1000</value>
+ <description>Interval between messages from the RegionServer to HMaster
+ in milliseconds. Default is 15. Set this value low if you want unit
+ tests to be responsive.
+ </description>
+ </property>
+ <property>
+ <name>hbase.client.pause</name>
+ <value>5000</value>
+ <description>General client pause value. Used mostly as value to wait
+ before running a retry of a failed get, region lookup, etc.</description>
+ </property>
+ <property>
+ <name>hbase.master.meta.thread.rescanfrequency</name>
+ <value>10000</value>
+ <description>How long the HMaster sleeps (in milliseconds) between scans of
+ the root and meta tables.
+ </description>
+ </property>
+ <property>
+ <name>hbase.server.thread.wakefrequency</name>
+ <value>1000</value>
+ <description>Time to sleep in between searches for work (in milliseconds).
+ Used as sleep interval by service threads such as META scanner and log roller.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.handler.count</name>
+ <value>5</value>
+ <description>Count of RPC Server instances spun up on RegionServers
+ Same property is used by the HMaster for count of master handlers.
+ Default is 10.
+ </description>
+ </property>
+ <property>
+ <name>hbase.master.lease.period</name>
+ <value>6000</value>
+ <description>Length of time the master will wait before timing out a region
+ server lease. Since region servers report in every second (see above), this
+ value has been reduced so that the master will notice a dead region server
+ sooner. The default is 30 seconds.
+ </description>
+ </property>
+ <property>
+ <name>hbase.master.info.port</name>
+ <value>-1</value>
+ <description>The port for the hbase master web UI
+ Set to -1 if you do not want the info server to run.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.info.port</name>
+ <value>-1</value>
+ <description>The port for the hbase regionserver web UI
+ Set to -1 if you do not want the info server to run.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.info.port.auto</name>
+ <value>true</value>
+ <description>Info server auto port bind. Enables automatic port
+ search if hbase.regionserver.info.port is already in use.
+ Enabled for testing to run multiple tests on one machine.
+ </description>
+ </property>
+ <property>
+ <name>hbase.master.lease.thread.wakefrequency</name>
+ <value>3000</value>
+ <description>The interval between checks for expired region server leases.
+ This value has been reduced due to the other reduced values above so that
+ the master will notice a dead region server sooner. The default is 15 seconds.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.optionalcacheflushinterval</name>
+ <value>10000</value>
+ <description>
+ Amount of time to wait since the last time a region was flushed before
+ invoking an optional cache flush. Default 60,000.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.safemode</name>
+ <value>false</value>
+ <description>
+ Turn on/off safe mode in region server. Always on for production, always off
+ for tests.
+ </description>
+ </property>
+ <property>
+ <name>hbase.hregion.max.filesize</name>
+ <value>67108864</value>
+ <description>
+ Maximum desired file size for an HRegion. If filesize exceeds
+ value + (value / 2), the HRegion is split in two. Default: 256M.
+
+ Keep the maximum filesize small so we split more often in tests.
+ </description>
+ </property>
+ <property>
+ <name>hadoop.log.dir</name>
+ <value>${user.dir}/../logs</value>
+ </property>
+ <property>
+ <name>hbase.zookeeper.property.clientPort</name>
+ <value>21818</value>
+ <description>Property from ZooKeeper's config zoo.cfg.
+ The port at which the clients will connect.
+ </description>
+ </property>
+</configuration>
Added: incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.2/services/HBASE/metainfo.xml
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.2/services/HBASE/metainfo.xml?rev=1406489&view=auto
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.2/services/HBASE/metainfo.xml (added)
+++ incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.2/services/HBASE/metainfo.xml Wed Nov 7 08:13:12 2012
@@ -0,0 +1,42 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <user>mapred</user>
+ <comment>This is comment for HBASE service</comment>
+ <version>1.0</version>
+
+
+ <components>
+ <component>
+ <name>HBASE_MASTER</name>
+ <category>MASTER</category>
+ </component>
+
+ <component>
+ <name>HBASE_REGIONSERVER</name>
+ <category>SLAVE</category>
+ </component>
+
+ <component>
+ <name>HBASE_CLIENT</name>
+ <category>CLIENT</category>
+ </component>
+ </components>
+
+
+</metainfo>
Added: incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.2/services/HDFS/configuration/hbase-site.xml
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.2/services/HDFS/configuration/hbase-site.xml?rev=1406489&view=auto
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.2/services/HDFS/configuration/hbase-site.xml (added)
+++ incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.2/services/HDFS/configuration/hbase-site.xml Wed Nov 7 08:13:12 2012
@@ -0,0 +1,137 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+ <property>
+ <name>hbase.regionserver.msginterval</name>
+ <value>1000</value>
+ <description>Interval between messages from the RegionServer to HMaster
+ in milliseconds. Default is 15. Set this value low if you want unit
+ tests to be responsive.
+ </description>
+ </property>
+ <property>
+ <name>hbase.client.pause</name>
+ <value>5000</value>
+ <description>General client pause value. Used mostly as value to wait
+ before running a retry of a failed get, region lookup, etc.</description>
+ </property>
+ <property>
+ <name>hbase.master.meta.thread.rescanfrequency</name>
+ <value>10000</value>
+ <description>How long the HMaster sleeps (in milliseconds) between scans of
+ the root and meta tables.
+ </description>
+ </property>
+ <property>
+ <name>hbase.server.thread.wakefrequency</name>
+ <value>1000</value>
+ <description>Time to sleep in between searches for work (in milliseconds).
+ Used as sleep interval by service threads such as META scanner and log roller.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.handler.count</name>
+ <value>5</value>
+ <description>Count of RPC Server instances spun up on RegionServers
+ Same property is used by the HMaster for count of master handlers.
+ Default is 10.
+ </description>
+ </property>
+ <property>
+ <name>hbase.master.lease.period</name>
+ <value>6000</value>
+ <description>Length of time the master will wait before timing out a region
+ server lease. Since region servers report in every second (see above), this
+ value has been reduced so that the master will notice a dead region server
+ sooner. The default is 30 seconds.
+ </description>
+ </property>
+ <property>
+ <name>hbase.master.info.port</name>
+ <value>-1</value>
+ <description>The port for the hbase master web UI
+ Set to -1 if you do not want the info server to run.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.info.port</name>
+ <value>-1</value>
+ <description>The port for the hbase regionserver web UI
+ Set to -1 if you do not want the info server to run.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.info.port.auto</name>
+ <value>true</value>
+ <description>Info server auto port bind. Enables automatic port
+ search if hbase.regionserver.info.port is already in use.
+ Enabled for testing to run multiple tests on one machine.
+ </description>
+ </property>
+ <property>
+ <name>hbase.master.lease.thread.wakefrequency</name>
+ <value>3000</value>
+ <description>The interval between checks for expired region server leases.
+ This value has been reduced due to the other reduced values above so that
+ the master will notice a dead region server sooner. The default is 15 seconds.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.optionalcacheflushinterval</name>
+ <value>10000</value>
+ <description>
+ Amount of time to wait since the last time a region was flushed before
+ invoking an optional cache flush. Default 60,000.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.safemode</name>
+ <value>false</value>
+ <description>
+ Turn on/off safe mode in region server. Always on for production, always off
+ for tests.
+ </description>
+ </property>
+ <property>
+ <name>hbase.hregion.max.filesize</name>
+ <value>67108864</value>
+ <description>
+ Maximum desired file size for an HRegion. If filesize exceeds
+ value + (value / 2), the HRegion is split in two. Default: 256M.
+
+ Keep the maximum filesize small so we split more often in tests.
+ </description>
+ </property>
+ <property>
+ <name>hadoop.log.dir</name>
+ <value>${user.dir}/../logs</value>
+ </property>
+ <property>
+ <name>hbase.zookeeper.property.clientPort</name>
+ <value>21818</value>
+ <description>Property from ZooKeeper's config zoo.cfg.
+ The port at which the clients will connect.
+ </description>
+ </property>
+</configuration>
Added: incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.2/services/HDFS/configuration/hdfs-site.xml
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.2/services/HDFS/configuration/hdfs-site.xml?rev=1406489&view=auto
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.2/services/HDFS/configuration/hdfs-site.xml (added)
+++ incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.2/services/HDFS/configuration/hdfs-site.xml Wed Nov 7 08:13:12 2012
@@ -0,0 +1,403 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+<!-- file system properties -->
+
+ <property>
+ <name>dfs.name.dir</name>
+ <!-- cluster variant -->
+ <value>/mnt/hmc/hadoop/hdfs/namenode</value>
+ <description>Determines where on the local filesystem the DFS name node
+ should store the name table. If this is a comma-delimited list
+ of directories then the name table is replicated in all of the
+ directories, for redundancy. </description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.support.append</name>
+ <value>true</value>
+ <description>to enable dfs append</description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.webhdfs.enabled</name>
+ <value>false</value>
+ <description>to enable webhdfs</description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.datanode.failed.volumes.tolerated</name>
+ <value>0</value>
+ <description>#of failed disks dn would tolerate</description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.block.local-path-access.user</name>
+ <value>hbase</value>
+ <description>the user who is allowed to perform short
+ circuit reads.
+ </description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.data.dir</name>
+ <value>/mnt/hmc/hadoop/hdfs/data</value>
+ <description>Determines where on the local filesystem an DFS data node
+ should store its blocks. If this is a comma-delimited
+ list of directories, then data will be stored in all named
+ directories, typically on different devices.
+ Directories that do not exist are ignored.
+ </description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.hosts.exclude</name>
+ <value>/etc/hadoop/conf/dfs.exclude</value>
+ <description>Names a file that contains a list of hosts that are
+ not permitted to connect to the namenode. The full pathname of the
+ file must be specified. If the value is empty, no hosts are
+ excluded.</description>
+ </property>
+
+ <property>
+ <name>dfs.hosts</name>
+ <value>/etc/hadoop/conf/dfs.include</value>
+ <description>Names a file that contains a list of hosts that are
+ permitted to connect to the namenode. The full pathname of the file
+ must be specified. If the value is empty, all hosts are
+ permitted.</description>
+ </property>
+
+ <property>
+ <name>dfs.replication.max</name>
+ <value>50</value>
+ <description>Maximal block replication.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.replication</name>
+ <value>3</value>
+ <description>Default block replication.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.heartbeat.interval</name>
+ <value>3</value>
+ <description>Determines datanode heartbeat interval in seconds.</description>
+ </property>
+
+ <property>
+ <name>dfs.safemode.threshold.pct</name>
+ <value>1.0f</value>
+ <description>
+ Specifies the percentage of blocks that should satisfy
+ the minimal replication requirement defined by dfs.replication.min.
+ Values less than or equal to 0 mean not to start in safe mode.
+ Values greater than 1 will make safe mode permanent.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.balance.bandwidthPerSec</name>
+ <value>6250000</value>
+ <description>
+ Specifies the maximum amount of bandwidth that each datanode
+ can utilize for the balancing purpose in term of
+ the number of bytes per second.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.datanode.address</name>
+ <value>0.0.0.0:50010</value>
+ </property>
+
+ <property>
+ <name>dfs.datanode.http.address</name>
+ <value>0.0.0.0:50075</value>
+ </property>
+
+ <property>
+ <name>dfs.block.size</name>
+ <value>134217728</value>
+ <description>The default block size for new files.</description>
+ </property>
+
+ <property>
+ <name>dfs.http.address</name>
+ <value>hdp1.cybervisiontech.com.ua:50070</value>
+<description>The name of the default file system. Either the
+literal string "local" or a host:port for NDFS.</description>
+<final>true</final>
+</property>
+
+<property>
+<name>dfs.datanode.du.reserved</name>
+<!-- cluster variant -->
+<value>1073741824</value>
+<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+</description>
+</property>
+
+<property>
+<name>dfs.datanode.ipc.address</name>
+<value>0.0.0.0:8010</value>
+<description>
+The datanode ipc server address and port.
+If the port is 0 then the server will start on a free port.
+</description>
+</property>
+
+<property>
+<name>dfs.blockreport.initialDelay</name>
+<value>120</value>
+<description>Delay for first block report in seconds.</description>
+</property>
+
+<property>
+<name>dfs.datanode.du.pct</name>
+<value>0.85f</value>
+<description>When calculating remaining space, only use this percentage of the real available space
+</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>40</value>
+<description>The number of server threads for the namenode.</description>
+</property>
+
+<property>
+<name>dfs.datanode.max.xcievers</name>
+<value>1024</value>
+<description>PRIVATE CONFIG VARIABLE</description>
+</property>
+
+<!-- Permissions configuration -->
+
+<property>
+<name>dfs.umaskmode</name>
+<value>077</value>
+<description>
+The octal umask used when creating files and directories.
+</description>
+</property>
+
+<property>
+<name>dfs.web.ugi</name>
+<!-- cluster variant -->
+<value>gopher,gopher</value>
+<description>The user account used by the web interface.
+Syntax: USERNAME,GROUP1,GROUP2, ...
+</description>
+</property>
+
+<property>
+<name>dfs.permissions</name>
+<value>true</value>
+<description>
+If "true", enable permission checking in HDFS.
+If "false", permission checking is turned off,
+but all other behavior is unchanged.
+Switching from one parameter value to the other does not change the mode,
+owner or group of files or directories.
+</description>
+</property>
+
+<property>
+<name>dfs.permissions.supergroup</name>
+<value>hdfs</value>
+<description>The name of the group of super-users.</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>100</value>
+<description>Added to grow Queue size so that more client connections are allowed</description>
+</property>
+
+<property>
+<name>ipc.server.max.response.size</name>
+<value>5242880</value>
+</property>
+<property>
+<name>dfs.block.access.token.enable</name>
+<value>true</value>
+<description>
+If "true", access tokens are used as capabilities for accessing datanodes.
+If "false", no access tokens are checked on accessing datanodes.
+</description>
+</property>
+
+<property>
+<name>dfs.namenode.kerberos.principal</name>
+<value>nn/_HOST@</value>
+<description>
+Kerberos principal name for the NameNode
+</description>
+</property>
+
+<property>
+<name>dfs.secondary.namenode.kerberos.principal</name>
+<value>nn/_HOST@</value>
+ <description>
+ Kerberos principal name for the secondary NameNode.
+ </description>
+ </property>
+
+
+<!--
+ This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
+-->
+ <property>
+ <name>dfs.namenode.kerberos.https.principal</name>
+ <value>host/_HOST@</value>
+ <description>The Kerberos principal for the host that the NameNode runs on.</description>
+
+ </property>
+
+ <property>
+ <name>dfs.secondary.namenode.kerberos.https.principal</name>
+ <value>host/_HOST@</value>
+ <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
+
+ </property>
+
+ <property>
+ <!-- cluster variant -->
+ <name>dfs.secondary.http.address</name>
+ <value>hdp2.cybervisiontech.com.ua:50090</value>
+ <description>Address of secondary namenode web server</description>
+ </property>
+
+ <property>
+ <name>dfs.secondary.https.port</name>
+ <value>50490</value>
+ <description>The https port where secondary-namenode binds</description>
+ </property>
+
+ <property>
+ <name>dfs.web.authentication.kerberos.principal</name>
+ <value>HTTP/_HOST@</value>
+ <description>
+ The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+ The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
+ HTTP SPENGO specification.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.web.authentication.kerberos.keytab</name>
+ <value>/nn.service.keytab</value>
+ <description>
+ The Kerberos keytab file with the credentials for the
+ HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.datanode.kerberos.principal</name>
+ <value>dn/_HOST@</value>
+ <description>
+ The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.namenode.keytab.file</name>
+ <value>/nn.service.keytab</value>
+ <description>
+ Combined keytab file containing the namenode service and host principals.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.secondary.namenode.keytab.file</name>
+ <value>/nn.service.keytab</value>
+ <description>
+ Combined keytab file containing the namenode service and host principals.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.datanode.keytab.file</name>
+ <value>/dn.service.keytab</value>
+ <description>
+ The filename of the keytab file for the DataNode.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.https.port</name>
+ <value>50470</value>
+ <description>The https port where namenode binds</description>
+
+ </property>
+
+ <property>
+ <name>dfs.https.address</name>
+ <value>hdp1.cybervisiontech.com.ua:50470</value>
+ <description>The https address where namenode binds</description>
+
+ </property>
+
+ <property>
+ <name>dfs.datanode.data.dir.perm</name>
+ <value>750</value>
+<description>The permissions that should be there on dfs.data.dir
+directories. The datanode will not come up if the permissions are
+different on existing dfs.data.dir directories. If the directories
+don't exist, they will be created with this permission.</description>
+ </property>
+
+ <property>
+ <name>dfs.access.time.precision</name>
+ <value>0</value>
+ <description>The access time for HDFS file is precise upto this value.
+ The default value is 1 hour. Setting a value of 0 disables
+ access times for HDFS.
+ </description>
+</property>
+
+<property>
+ <name>dfs.cluster.administrators</name>
+ <value> hdfs</value>
+ <description>ACL for who all can view the default servlets in the HDFS</description>
+</property>
+
+<property>
+ <name>ipc.server.read.threadpool.size</name>
+ <value>5</value>
+ <description></description>
+</property>
+
+</configuration>
Added: incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.2/services/HDFS/metainfo.xml
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.2/services/HDFS/metainfo.xml?rev=1406489&view=auto
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.2/services/HDFS/metainfo.xml (added)
+++ incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.2/services/HDFS/metainfo.xml Wed Nov 7 08:13:12 2012
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<metainfo>
+ <user>root</user>
+ <comment>This is comment for HDFS service</comment>
+ <version>1.0</version>
+
+
+ <components>
+ <component>
+ <name>NAMENODE</name>
+ <category>MASTER</category>
+ </component>
+
+ <component>
+ <name>DATANODE</name>
+ <category>SLAVE</category>
+ </component>
+
+ <component>
+ <name>HDFS_CLIENT</name>
+ <category>CLIENT</category>
+ </component>
+ </components>
+
+
+</metainfo>
Added: incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.2/services/HIVE/metainfo.xml
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.2/services/HIVE/metainfo.xml?rev=1406489&view=auto
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.2/services/HIVE/metainfo.xml (added)
+++ incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.2/services/HIVE/metainfo.xml Wed Nov 7 08:13:12 2012
@@ -0,0 +1,42 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <user>root</user>
+ <comment>This is comment for HIVE service</comment>
+ <version>1.0</version>
+
+
+ <components>
+ <component>
+ <name>HIVE_SERVER</name>
+ <category>MASTER</category>
+ </component>
+
+ <component>
+ <name>MYSQL_SERVER</name>
+ <category>MASTER</category>
+ </component>
+
+ <component>
+ <name>HIVE_CLIENT</name>
+ <category>CLIENT</category>
+ </component>
+ </components>
+
+
+</metainfo>
Added: incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.2/services/MAPREDUCE/configuration/hbase-site.xml
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.2/services/MAPREDUCE/configuration/hbase-site.xml?rev=1406489&view=auto
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.2/services/MAPREDUCE/configuration/hbase-site.xml (added)
+++ incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.2/services/MAPREDUCE/configuration/hbase-site.xml Wed Nov 7 08:13:12 2012
@@ -0,0 +1,137 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+ <property>
+ <name>hbase.regionserver.msginterval</name>
+ <value>1000</value>
+ <description>Interval between messages from the RegionServer to HMaster
+ in milliseconds. Default is 15. Set this value low if you want unit
+ tests to be responsive.
+ </description>
+ </property>
+ <property>
+ <name>hbase.client.pause</name>
+ <value>5000</value>
+ <description>General client pause value. Used mostly as value to wait
+ before running a retry of a failed get, region lookup, etc.</description>
+ </property>
+ <property>
+ <name>hbase.master.meta.thread.rescanfrequency</name>
+ <value>10000</value>
+ <description>How long the HMaster sleeps (in milliseconds) between scans of
+ the root and meta tables.
+ </description>
+ </property>
+ <property>
+ <name>hbase.server.thread.wakefrequency</name>
+ <value>1000</value>
+ <description>Time to sleep in between searches for work (in milliseconds).
+ Used as sleep interval by service threads such as META scanner and log roller.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.handler.count</name>
+ <value>5</value>
+ <description>Count of RPC Server instances spun up on RegionServers
+ Same property is used by the HMaster for count of master handlers.
+ Default is 10.
+ </description>
+ </property>
+ <property>
+ <name>hbase.master.lease.period</name>
+ <value>6000</value>
+ <description>Length of time the master will wait before timing out a region
+ server lease. Since region servers report in every second (see above), this
+ value has been reduced so that the master will notice a dead region server
+ sooner. The default is 30 seconds.
+ </description>
+ </property>
+ <property>
+ <name>hbase.master.info.port</name>
+ <value>-1</value>
+ <description>The port for the hbase master web UI
+ Set to -1 if you do not want the info server to run.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.info.port</name>
+ <value>-1</value>
+ <description>The port for the hbase regionserver web UI
+ Set to -1 if you do not want the info server to run.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.info.port.auto</name>
+ <value>true</value>
+ <description>Info server auto port bind. Enables automatic port
+ search if hbase.regionserver.info.port is already in use.
+ Enabled for testing to run multiple tests on one machine.
+ </description>
+ </property>
+ <property>
+ <name>hbase.master.lease.thread.wakefrequency</name>
+ <value>3000</value>
+ <description>The interval between checks for expired region server leases.
+ This value has been reduced due to the other reduced values above so that
+ the master will notice a dead region server sooner. The default is 15 seconds.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.optionalcacheflushinterval</name>
+ <value>10000</value>
+ <description>
+ Amount of time to wait since the last time a region was flushed before
+ invoking an optional cache flush. Default 60,000.
+ </description>
+ </property>
+ <property>
+ <name>hbase.regionserver.safemode</name>
+ <value>false</value>
+ <description>
+ Turn on/off safe mode in region server. Always on for production, always off
+ for tests.
+ </description>
+ </property>
+ <property>
+ <name>hbase.hregion.max.filesize</name>
+ <value>67108864</value>
+ <description>
+ Maximum desired file size for an HRegion. If filesize exceeds
+ value + (value / 2), the HRegion is split in two. Default: 256M.
+
+ Keep the maximum filesize small so we split more often in tests.
+ </description>
+ </property>
+ <property>
+ <name>hadoop.log.dir</name>
+ <value>${user.dir}/../logs</value>
+ </property>
+ <property>
+ <name>hbase.zookeeper.property.clientPort</name>
+ <value>21818</value>
+ <description>Property from ZooKeeper's config zoo.cfg.
+ The port at which the clients will connect.
+ </description>
+ </property>
+</configuration>
Added: incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.2/services/MAPREDUCE/configuration/hdfs-site.xml
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.2/services/MAPREDUCE/configuration/hdfs-site.xml?rev=1406489&view=auto
==============================================================================
--- incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.2/services/MAPREDUCE/configuration/hdfs-site.xml (added)
+++ incubator/ambari/branches/AMBARI-666/ambari-server/src/test/resources/stacks/HDP/0.2/services/MAPREDUCE/configuration/hdfs-site.xml Wed Nov 7 08:13:12 2012
@@ -0,0 +1,403 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+<!-- file system properties -->
+
+ <property>
+ <name>dfs.name.dir</name>
+ <!-- cluster variant -->
+ <value>/mnt/hmc/hadoop/hdfs/namenode</value>
+ <description>Determines where on the local filesystem the DFS name node
+ should store the name table. If this is a comma-delimited list
+ of directories then the name table is replicated in all of the
+ directories, for redundancy. </description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.support.append</name>
+ <value>true</value>
+ <description>to enable dfs append</description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.webhdfs.enabled</name>
+ <value>false</value>
+ <description>to enable webhdfs</description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.datanode.failed.volumes.tolerated</name>
+ <value>0</value>
+ <description>#of failed disks dn would tolerate</description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.block.local-path-access.user</name>
+ <value>hbase</value>
+ <description>the user who is allowed to perform short
+ circuit reads.
+ </description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.data.dir</name>
+ <value>/mnt/hmc/hadoop/hdfs/data</value>
+ <description>Determines where on the local filesystem an DFS data node
+ should store its blocks. If this is a comma-delimited
+ list of directories, then data will be stored in all named
+ directories, typically on different devices.
+ Directories that do not exist are ignored.
+ </description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.hosts.exclude</name>
+ <value>/etc/hadoop/conf/dfs.exclude</value>
+ <description>Names a file that contains a list of hosts that are
+ not permitted to connect to the namenode. The full pathname of the
+ file must be specified. If the value is empty, no hosts are
+ excluded.</description>
+ </property>
+
+ <property>
+ <name>dfs.hosts</name>
+ <value>/etc/hadoop/conf/dfs.include</value>
+ <description>Names a file that contains a list of hosts that are
+ permitted to connect to the namenode. The full pathname of the file
+ must be specified. If the value is empty, all hosts are
+ permitted.</description>
+ </property>
+
+ <property>
+ <name>dfs.replication.max</name>
+ <value>50</value>
+ <description>Maximal block replication.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.replication</name>
+ <value>3</value>
+ <description>Default block replication.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.heartbeat.interval</name>
+ <value>3</value>
+ <description>Determines datanode heartbeat interval in seconds.</description>
+ </property>
+
+ <property>
+ <name>dfs.safemode.threshold.pct</name>
+ <value>1.0f</value>
+ <description>
+ Specifies the percentage of blocks that should satisfy
+ the minimal replication requirement defined by dfs.replication.min.
+ Values less than or equal to 0 mean not to start in safe mode.
+ Values greater than 1 will make safe mode permanent.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.balance.bandwidthPerSec</name>
+ <value>6250000</value>
+ <description>
+ Specifies the maximum amount of bandwidth that each datanode
+ can utilize for the balancing purpose in term of
+ the number of bytes per second.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.datanode.address</name>
+ <value>0.0.0.0:50010</value>
+ </property>
+
+ <property>
+ <name>dfs.datanode.http.address</name>
+ <value>0.0.0.0:50075</value>
+ </property>
+
+ <property>
+ <name>dfs.block.size</name>
+ <value>134217728</value>
+ <description>The default block size for new files.</description>
+ </property>
+
+ <property>
+ <name>dfs.http.address</name>
+ <value>hdp1.cybervisiontech.com.ua:50070</value>
+<description>The name of the default file system. Either the
+literal string "local" or a host:port for NDFS.</description>
+<final>true</final>
+</property>
+
+<property>
+<name>dfs.datanode.du.reserved</name>
+<!-- cluster variant -->
+<value>1073741824</value>
+<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+</description>
+</property>
+
+<property>
+<name>dfs.datanode.ipc.address</name>
+<value>0.0.0.0:8010</value>
+<description>
+The datanode ipc server address and port.
+If the port is 0 then the server will start on a free port.
+</description>
+</property>
+
+<property>
+<name>dfs.blockreport.initialDelay</name>
+<value>120</value>
+<description>Delay for first block report in seconds.</description>
+</property>
+
+<property>
+<name>dfs.datanode.du.pct</name>
+<value>0.85f</value>
+<description>When calculating remaining space, only use this percentage of the real available space
+</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>40</value>
+<description>The number of server threads for the namenode.</description>
+</property>
+
+<property>
+<name>dfs.datanode.max.xcievers</name>
+<value>1024</value>
+<description>PRIVATE CONFIG VARIABLE</description>
+</property>
+
+<!-- Permissions configuration -->
+
+<property>
+<name>dfs.umaskmode</name>
+<value>077</value>
+<description>
+The octal umask used when creating files and directories.
+</description>
+</property>
+
+<property>
+<name>dfs.web.ugi</name>
+<!-- cluster variant -->
+<value>gopher,gopher</value>
+<description>The user account used by the web interface.
+Syntax: USERNAME,GROUP1,GROUP2, ...
+</description>
+</property>
+
+<property>
+<name>dfs.permissions</name>
+<value>true</value>
+<description>
+If "true", enable permission checking in HDFS.
+If "false", permission checking is turned off,
+but all other behavior is unchanged.
+Switching from one parameter value to the other does not change the mode,
+owner or group of files or directories.
+</description>
+</property>
+
+<property>
+<name>dfs.permissions.supergroup</name>
+<value>hdfs</value>
+<description>The name of the group of super-users.</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>100</value>
+<description>Added to grow Queue size so that more client connections are allowed</description>
+</property>
+
+<property>
+<name>ipc.server.max.response.size</name>
+<value>5242880</value>
+</property>
+<property>
+<name>dfs.block.access.token.enable</name>
+<value>true</value>
+<description>
+If "true", access tokens are used as capabilities for accessing datanodes.
+If "false", no access tokens are checked on accessing datanodes.
+</description>
+</property>
+
+<property>
+<name>dfs.namenode.kerberos.principal</name>
+<value>nn/_HOST@</value>
+<description>
+Kerberos principal name for the NameNode
+</description>
+</property>
+
+<property>
+<name>dfs.secondary.namenode.kerberos.principal</name>
+<value>nn/_HOST@</value>
+ <description>
+ Kerberos principal name for the secondary NameNode.
+ </description>
+ </property>
+
+
+<!--
+ This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
+-->
+ <property>
+ <name>dfs.namenode.kerberos.https.principal</name>
+ <value>host/_HOST@</value>
+ <description>The Kerberos principal for the host that the NameNode runs on.</description>
+
+ </property>
+
+ <property>
+ <name>dfs.secondary.namenode.kerberos.https.principal</name>
+ <value>host/_HOST@</value>
+ <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
+
+ </property>
+
+ <property>
+ <!-- cluster variant -->
+ <name>dfs.secondary.http.address</name>
+ <value>hdp2.cybervisiontech.com.ua:50090</value>
+ <description>Address of secondary namenode web server</description>
+ </property>
+
+ <property>
+ <name>dfs.secondary.https.port</name>
+ <value>50490</value>
+ <description>The https port where secondary-namenode binds</description>
+ </property>
+
+ <property>
+ <name>dfs.web.authentication.kerberos.principal</name>
+ <value>HTTP/_HOST@</value>
+ <description>
+ The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+ The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
+ HTTP SPENGO specification.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.web.authentication.kerberos.keytab</name>
+ <value>/nn.service.keytab</value>
+ <description>
+ The Kerberos keytab file with the credentials for the
+ HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.datanode.kerberos.principal</name>
+ <value>dn/_HOST@</value>
+ <description>
+ The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.namenode.keytab.file</name>
+ <value>/nn.service.keytab</value>
+ <description>
+ Combined keytab file containing the namenode service and host principals.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.secondary.namenode.keytab.file</name>
+ <value>/nn.service.keytab</value>
+ <description>
+ Combined keytab file containing the namenode service and host principals.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.datanode.keytab.file</name>
+ <value>/dn.service.keytab</value>
+ <description>
+ The filename of the keytab file for the DataNode.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.https.port</name>
+ <value>50470</value>
+ <description>The https port where namenode binds</description>
+
+ </property>
+
+ <property>
+ <name>dfs.https.address</name>
+ <value>hdp1.cybervisiontech.com.ua:50470</value>
+ <description>The https address where namenode binds</description>
+
+ </property>
+
+ <property>
+ <name>dfs.datanode.data.dir.perm</name>
+ <value>750</value>
+<description>The permissions that should be there on dfs.data.dir
+directories. The datanode will not come up if the permissions are
+different on existing dfs.data.dir directories. If the directories
+don't exist, they will be created with this permission.</description>
+ </property>
+
+ <property>
+ <name>dfs.access.time.precision</name>
+ <value>0</value>
+ <description>The access time for HDFS file is precise upto this value.
+ The default value is 1 hour. Setting a value of 0 disables
+ access times for HDFS.
+ </description>
+</property>
+
+<property>
+ <name>dfs.cluster.administrators</name>
+ <value> hdfs</value>
+ <description>ACL for who all can view the default servlets in the HDFS</description>
+</property>
+
+<property>
+ <name>ipc.server.read.threadpool.size</name>
+ <value>5</value>
+ <description></description>
+</property>
+
+</configuration>