You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by al...@apache.org on 2017/06/28 00:24:37 UTC
[39/51] [partial] ambari git commit: AMBARI-21349. Create BigInsights
Stack Skeleton in Ambari 2.5 (alejandro)
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hdfs-site.xml
new file mode 100755
index 0000000..fc510fa
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/hdfs-site.xml
@@ -0,0 +1,606 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration supports_final="true">
+
+ <!-- file system properties -->
+
+ <property>
+ <name>dfs.namenode.name.dir</name>
+ <!-- cluster variant -->
+ <value>/hadoop/hdfs/namenode</value>
+ <description>Determines where on the local filesystem the DFS name node
+ should store the name table. If this is a comma-delimited list
+ of directories then the name table is replicated in all of the
+ directories, for redundancy. </description>
+ <display-name>NameNode directories</display-name>
+ <final>true</final>
+ <value-attributes>
+ <type>directories</type>
+ <overridable>false</overridable>
+ </value-attributes>
+ </property>
+
+ <property>
+ <name>dfs.support.append</name>
+ <value>true</value>
+ <description>to enable dfs append</description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.webhdfs.enabled</name>
+ <value>true</value>
+ <display-name>WebHDFS enabled</display-name>
+ <description>Whether to enable WebHDFS feature</description>
+ <final>true</final>
+ <value-attributes>
+ <type>boolean</type>
+ <overridable>false</overridable>
+ </value-attributes>
+ </property>
+
+ <property>
+ <name>dfs.datanode.failed.volumes.tolerated</name>
+ <value>0</value>
+ <description> Number of failed disks a DataNode would tolerate before it stops offering service</description>
+ <final>true</final>
+ <display-name>DataNode failed disk tolerance</display-name>
+ <value-attributes>
+ <type>int</type>
+ <minimum>0</minimum>
+ <maximum>2</maximum>
+ <increment-step>1</increment-step>
+ </value-attributes>
+ <depends-on>
+ <property>
+ <type>hdfs-site</type>
+ <name>dfs.datanode.data.dir</name>
+ </property>
+ </depends-on>
+ </property>
+
+ <property>
+ <name>dfs.datanode.data.dir</name>
+ <value>/hadoop/hdfs/data</value>
+ <display-name>DataNode directories</display-name>
+ <description>Determines where on the local filesystem an DFS data node
+ should store its blocks. If this is a comma-delimited
+ list of directories, then data will be stored in all named
+ directories, typically on different devices.
+ Directories that do not exist are ignored.
+ </description>
+ <final>true</final>
+ <value-attributes>
+ <type>directories</type>
+ </value-attributes>
+ </property>
+
+ <property>
+ <name>dfs.hosts.exclude</name>
+ <value>/etc/hadoop/conf/dfs.exclude</value>
+ <description>Names a file that contains a list of hosts that are
+ not permitted to connect to the namenode. The full pathname of the
+ file must be specified. If the value is empty, no hosts are
+ excluded.</description>
+ </property>
+
+ <!--
+ <property>
+ <name>dfs.hosts</name>
+ <value>/etc/hadoop/conf/dfs.include</value>
+ <description>Names a file that contains a list of hosts that are
+ permitted to connect to the namenode. The full pathname of the file
+ must be specified. If the value is empty, all hosts are
+ permitted.</description>
+ </property>
+ -->
+
+ <property>
+ <name>dfs.namenode.checkpoint.dir</name>
+ <value>/hadoop/hdfs/namesecondary</value>
+ <display-name>SecondaryNameNode Checkpoint directories</display-name>
+ <description>Determines where on the local filesystem the DFS secondary
+ name node should store the temporary images to merge.
+ If this is a comma-delimited list of directories then the image is
+ replicated in all of the directories for redundancy.
+ </description>
+ <value-attributes>
+ <type>directories</type>
+ <overridable>false</overridable>
+ </value-attributes>
+ </property>
+
+ <property>
+ <name>dfs.namenode.checkpoint.edits.dir</name>
+ <value>${dfs.namenode.checkpoint.dir}</value>
+ <description>Determines where on the local filesystem the DFS secondary
+ name node should store the temporary edits to merge.
+ If this is a comma-delimited list of directoires then teh edits is
+ replicated in all of the directoires for redundancy.
+ Default value is same as dfs.namenode.checkpoint.dir
+ </description>
+ </property>
+
+
+ <property>
+ <name>dfs.namenode.checkpoint.period</name>
+ <value>21600</value>
+ <display-name>HDFS Maximum Checkpoint Delay</display-name>
+ <description>The number of seconds between two periodic checkpoints.</description>
+ <value-attributes>
+ <type>int</type>
+ <unit>seconds</unit>
+ </value-attributes>
+ </property>
+
+ <property>
+ <name>dfs.namenode.checkpoint.txns</name>
+ <value>1000000</value>
+ <description>The Secondary NameNode or CheckpointNode will create a checkpoint
+ of the namespace every 'dfs.namenode.checkpoint.txns' transactions,
+ regardless of whether 'dfs.namenode.checkpoint.period' has expired.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.replication.max</name>
+ <value>50</value>
+ <description>Maximal block replication.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.replication</name>
+ <value>3</value>
+ <description>Default block replication.</description>
+ <display-name>Block replication</display-name>
+ <value-attributes>
+ <type>int</type>
+ </value-attributes>
+ </property>
+
+ <property>
+ <name>dfs.heartbeat.interval</name>
+ <value>3</value>
+ <description>Determines datanode heartbeat interval in seconds.</description>
+ </property>
+
+ <property>
+ <name>dfs.heartbeat.interval</name>
+ <value>3</value>
+ <description>Determines datanode heartbeat interval in seconds.</description>
+ </property>
+
+ <property>
+ <name>dfs.namenode.safemode.threshold-pct</name>
+ <value>0.999</value>
+ <description>
+ Specifies the percentage of blocks that should satisfy
+ the minimal replication requirement defined by dfs.namenode.replication.min.
+ Values less than or equal to 0 mean not to start in safe mode.
+ Values greater than 1 will make safe mode permanent.
+ </description>
+ <display-name>Minimum replicated blocks %</display-name>
+ <value-attributes>
+ <type>float</type>
+ <minimum>0.990</minimum>
+ <maximum>1.000</maximum>
+ <increment-step>0.001</increment-step>
+ </value-attributes>
+ </property>
+
+ <property>
+ <name>dfs.datanode.balance.bandwidthPerSec</name>
+ <value>6250000</value>
+ <description>
+ Specifies the maximum amount of bandwidth that each datanode
+ can utilize for the balancing purpose in term of
+ the number of bytes per second.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.https.port</name>
+ <value>50470</value>
+ <description>
+ This property is used by HftpFileSystem.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.datanode.address</name>
+ <value>0.0.0.0:50010</value>
+ <description>
+ The datanode server address and port for data transfer.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.datanode.http.address</name>
+ <value>0.0.0.0:50075</value>
+ <description>
+ The datanode http server address and port.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.datanode.https.address</name>
+ <value>0.0.0.0:50475</value>
+ <description>
+ The datanode https server address and port.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.blocksize</name>
+ <value>134217728</value>
+ <description>The default block size for new files.</description>
+ </property>
+
+ <property>
+ <name>dfs.namenode.http-address</name>
+ <value>localhost:50070</value>
+ <description>The name of the default file system. Either the
+ literal string "local" or a host:port for NDFS.</description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.datanode.du.reserved</name>
+ <!-- cluster variant -->
+ <value>1073741824</value>
+ <display-name>Reserved space for HDFS</display-name>
+ <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.</description>
+ <value-attributes>
+ <type>int</type>
+ <unit>bytes</unit>
+ </value-attributes>
+ </property>
+
+ <property>
+ <name>dfs.datanode.ipc.address</name>
+ <value>0.0.0.0:8010</value>
+ <description>
+ The datanode ipc server address and port.
+ If the port is 0 then the server will start on a free port.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.blockreport.initialDelay</name>
+ <value>120</value>
+ <description>Delay for first block report in seconds.</description>
+ </property>
+
+ <property>
+ <name>dfs.datanode.max.transfer.threads</name>
+ <value>8192</value>
+ <description>Specifies the maximum number of threads to use for transferring data in and out of the datanode.</description>
+ <display-name>DataNode max data transfer threads</display-name>
+ <value-attributes>
+ <type>int</type>
+ <minimum>0</minimum>
+ <maximum>48000</maximum>
+ </value-attributes>
+ </property>
+
+ <!-- Permissions configuration -->
+
+ <property>
+ <name>fs.permissions.umask-mode</name>
+ <value>022</value>
+ <description>
+ The octal umask used when creating files and directories.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.permissions.enabled</name>
+ <value>true</value>
+ <description>
+ If "true", enable permission checking in HDFS.
+ If "false", permission checking is turned off,
+ but all other behavior is unchanged.
+ Switching from one parameter value to the other does not change the mode,
+ owner or group of files or directories.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.permissions.superusergroup</name>
+ <value>hdfs</value>
+ <description>The name of the group of super-users.</description>
+ </property>
+
+ <property>
+ <name>dfs.namenode.handler.count</name>
+ <value>64</value>
+ <description>Added to grow Queue size so that more client connections are allowed</description>
+ <display-name>NameNode Server threads</display-name>
+ <value-attributes>
+ <type>int</type>
+ <minimum>1</minimum>
+ <maximum>200</maximum>
+ </value-attributes>
+ </property>
+
+ <property>
+ <name>dfs.block.access.token.enable</name>
+ <value>true</value>
+ <description>
+ If "true", access tokens are used as capabilities for accessing datanodes.
+ If "false", no access tokens are checked on accessing datanodes.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.namenode.kerberos.principal</name>
+ <value>nn/_HOST@EXAMPLE.COM</value>
+ <description>
+ Kerberos principal name for the NameNode
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.secondary.namenode.kerberos.principal</name>
+ <value>nn/_HOST@EXAMPLE.COM</value>
+ <description>
+ Kerberos principal name for the secondary NameNode.
+ </description>
+ </property>
+
+
+ <!--
+ This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
+ -->
+ <property>
+ <name>dfs.namenode.kerberos.https.principal</name>
+ <value>HTTP/_HOST@EXAMPLE.COM</value>
+ <description>The Kerberos principal for the host that the NameNode runs on.</description>
+
+ </property>
+
+ <property>
+ <name>dfs.secondary.namenode.kerberos.https.principal</name>
+ <value>HTTP/_HOST@EXAMPLE.COM</value>
+ <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
+
+ </property>
+
+ <property>
+ <!-- cluster variant -->
+ <name>dfs.namenode.secondary.http-address</name>
+ <value>localhost:50090</value>
+ <description>Address of secondary namenode web server</description>
+ </property>
+
+ <property>
+ <name>dfs.web.authentication.kerberos.principal</name>
+ <value>HTTP/_HOST@EXAMPLE.COM</value>
+ <description>
+ The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+ The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
+ HTTP SPENGO specification.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.web.authentication.kerberos.keytab</name>
+ <value>/etc/security/keytabs/spnego.service.keytab</value>
+ <description>
+ The Kerberos keytab file with the credentials for the
+ HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.datanode.kerberos.principal</name>
+ <value>dn/_HOST@EXAMPLE.COM</value>
+ <description>
+ The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.namenode.keytab.file</name>
+ <value>/etc/security/keytabs/nn.service.keytab</value>
+ <description>
+ Combined keytab file containing the namenode service and host principals.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.secondary.namenode.keytab.file</name>
+ <value>/etc/security/keytabs/nn.service.keytab</value>
+ <description>
+ Combined keytab file containing the namenode service and host principals.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.datanode.keytab.file</name>
+ <value>/etc/security/keytabs/dn.service.keytab</value>
+ <description>
+ The filename of the keytab file for the DataNode.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.namenode.https-address</name>
+ <value>localhost:50470</value>
+ <description>The https address where namenode binds</description>
+
+ </property>
+
+ <property>
+ <name>dfs.datanode.data.dir.perm</name>
+ <value>750</value>
+ <display-name>DataNode directories permission</display-name>
+ <description>The permissions that should be there on dfs.datanode.data.dir
+ directories. The datanode will not come up if the permissions are
+ different on existing dfs.datanode.data.dir directories. If the directories
+ don't exist, they will be created with this permission.</description>
+ <value-attributes>
+ <type>int</type>
+ </value-attributes>
+ </property>
+
+ <property>
+ <name>dfs.namenode.accesstime.precision</name>
+ <value>3600000</value>
+ <display-name>Access time precision</display-name>
+ <description>The access time for HDFS file is precise upto this value.
+ The default value is 1 hour. Setting a value of 0 disables
+ access times for HDFS.
+ </description>
+ <value-attributes>
+ <type>int</type>
+ </value-attributes>
+ </property>
+
+ <property>
+ <name>dfs.cluster.administrators</name>
+ <value> hdfs</value>
+ <description>ACL for who all can view the default servlets in the HDFS</description>
+ <value-attributes>
+ <visible>true</visible>
+ </value-attributes>
+ </property>
+
+ <property>
+ <name>dfs.namenode.avoid.read.stale.datanode</name>
+ <value>true</value>
+ <description>
+ Indicate whether or not to avoid reading from stale datanodes whose
+ heartbeat messages have not been received by the namenode for more than a
+ specified time interval.
+ </description>
+ </property>
+ <property>
+ <name>dfs.namenode.avoid.write.stale.datanode</name>
+ <value>true</value>
+ <description>
+ Indicate whether or not to avoid writing to stale datanodes whose
+ heartbeat messages have not been received by the namenode for more than a
+ specified time interval.
+ </description>
+ </property>
+ <property>
+ <name>dfs.namenode.write.stale.datanode.ratio</name>
+ <value>1.0f</value>
+ <description>When the ratio of number stale datanodes to total datanodes marked is greater
+ than this ratio, stop avoiding writing to stale nodes so as to prevent causing hotspots.
+ </description>
+ </property>
+ <property>
+ <name>dfs.namenode.stale.datanode.interval</name>
+ <value>30000</value>
+ <description>Datanode is stale after not getting a heartbeat in this interval in ms</description>
+ </property>
+
+ <property>
+ <name>dfs.journalnode.http-address</name>
+ <value>0.0.0.0:8480</value>
+ <description>The address and port the JournalNode web UI listens on.
+ If the port is 0 then the server will start on a free port. </description>
+ </property>
+
+ <property>
+ <name>dfs.journalnode.edits.dir</name>
+ <value>/grid/0/hdfs/journal</value>
+ <description>The path where the JournalNode daemon will store its local state. </description>
+ </property>
+
+ <!-- HDFS Short-Circuit Local Reads -->
+
+ <property>
+ <name>dfs.client.read.shortcircuit</name>
+ <value>true</value>
+ <description>This configuration parameter turns on short-circuit local reads.</description>
+ <display-name>HDFS Short-circuit read</display-name>
+ <value-attributes>
+ <type>boolean</type>
+ </value-attributes>
+ </property>
+
+ <property>
+ <name>dfs.domain.socket.path</name>
+ <value>/var/lib/hadoop-hdfs/dn_socket</value>
+ <description>
+ This is a path to a UNIX domain socket that will be used for communication between the DataNode and local HDFS clients.
+ If the string "_PORT" is present in this path, it will be replaced by the TCP port of the DataNode.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.client.read.shortcircuit.streams.cache.size</name>
+ <value>4096</value>
+ <description>
+ The DFSClient maintains a cache of recently opened file descriptors. This
+ parameter controls the size of that cache. Setting this higher will use
+ more file descriptors, but potentially provide better performance on
+ workloads involving lots of seeks.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.namenode.name.dir.restore</name>
+ <value>true</value>
+ <description>Set to true to enable NameNode to attempt recovering a previously failed dfs.namenode.name.dir.
+ When enabled, a recovery of any failed directory is attempted during checkpoint.</description>
+ </property>
+
+ <property>
+ <name>dfs.datanode.handler.count</name>
+ <value>40</value>
+ </property>
+
+ <property>
+ <name>dfs.namenode.acls.enabled</name>
+ <value>true</value>
+ </property>
+
+ <property>
+ <name>dfs.client.file-block-storage-locations.timeout.millis</name>
+ <value>3000</value>
+ </property>
+
+ <property>
+ <name>dfs.client.mmap.enabled</name>
+ <value>true</value>
+ </property>
+
+ <property>
+ <name>dfs.datanode.max.locked.memory</name>
+ <value>0</value>
+ </property>
+
+ <property>
+ <name>dfs.datanode.hdfs-blocks-metadata.enabled</name>
+ <value>true</value>
+ </property>
+ <property>
+ <name>dfs.http.policy</name>
+ <value>HTTP_ONLY</value>
+ </property>
+</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/ssl-client.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/ssl-client.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/ssl-client.xml
new file mode 100755
index 0000000..4513fdd
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/ssl-client.xml
@@ -0,0 +1,58 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration>
+ <property>
+ <name>ssl.client.truststore.location</name>
+ <value>/etc/security/clientKeys/all.jks</value>
+ <description>Location of the trust store file.</description>
+ </property>
+ <property>
+ <name>ssl.client.truststore.type</name>
+ <value>jks</value>
+ <description>Optional. Default value is "jks".</description>
+ </property>
+ <property>
+ <name>ssl.client.truststore.password</name>
+ <value>bigdata</value>
+ <property-type>PASSWORD</property-type>
+ <description>Password to open the trust store file.</description>
+ </property>
+ <property>
+ <name>ssl.client.truststore.reload.interval</name>
+ <value>10000</value>
+ <description>Truststore reload interval, in milliseconds.</description>
+ </property>
+ <property>
+ <name>ssl.client.keystore.type</name>
+ <value>jks</value>
+ <description>Optional. Default value is "jks".</description>
+ </property>
+ <property>
+ <name>ssl.client.keystore.location</name>
+ <value>/etc/security/clientKeys/keystore.jks</value>
+ <description>Location of the keystore file.</description>
+ </property>
+ <property>
+ <name>ssl.client.keystore.password</name>
+ <value>bigdata</value>
+ <property-type>PASSWORD</property-type>
+ <description>Password to open the keystore file.</description>
+ </property>
+</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/ssl-server.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/ssl-server.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/ssl-server.xml
new file mode 100755
index 0000000..f95793e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/configuration/ssl-server.xml
@@ -0,0 +1,64 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration>
+ <property>
+ <name>ssl.server.truststore.location</name>
+ <value>/etc/security/serverKeys/all.jks</value>
+ <description>Location of the trust store file.</description>
+ </property>
+ <property>
+ <name>ssl.server.truststore.type</name>
+ <value>jks</value>
+ <description>Optional. Default value is "jks".</description>
+ </property>
+ <property>
+ <name>ssl.server.truststore.password</name>
+ <value>bigdata</value>
+ <property-type>PASSWORD</property-type>
+ <description>Password to open the trust store file.</description>
+ </property>
+ <property>
+ <name>ssl.server.truststore.reload.interval</name>
+ <value>10000</value>
+ <description>Truststore reload interval, in milliseconds.</description>
+ </property>
+ <property>
+ <name>ssl.server.keystore.type</name>
+ <value>jks</value>
+ <description>Optional. Default value is "jks".</description>
+ </property>
+ <property>
+ <name>ssl.server.keystore.location</name>
+ <value>/etc/security/serverKeys/keystore.jks</value>
+ <description>Location of the keystore file.</description>
+ </property>
+ <property>
+ <name>ssl.server.keystore.password</name>
+ <value>bigdata</value>
+ <property-type>PASSWORD</property-type>
+ <description>Password to open the keystore file.</description>
+ </property>
+ <property>
+ <name>ssl.server.keystore.keypassword</name>
+ <value>bigdata</value>
+ <property-type>PASSWORD</property-type>
+ <description>Password for private key in keystore file.</description>
+ </property>
+</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/kerberos.json b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/kerberos.json
new file mode 100755
index 0000000..2d1674b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/kerberos.json
@@ -0,0 +1,242 @@
+{
+ "services": [
+ {
+ "name": "HDFS",
+ "identities": [
+ {
+ "name": "/spnego",
+ "principal": {
+ "configuration": "hdfs-site/dfs.web.authentication.kerberos.principal"
+ },
+ "keytab": {
+ "configuration": "hdfs-site/dfs.web.authentication.kerberos.keytab"
+ }
+ },
+ {
+ "name": "/smokeuser"
+ },
+ {
+ "name": "/hdfs"
+ }
+ ],
+ "auth_to_local_properties" : [
+ "core-site/hadoop.security.auth_to_local"
+ ],
+ "configurations": [
+ {
+ "core-site": {
+ "hadoop.security.authentication": "kerberos",
+ "hadoop.rpc.protection": "authentication",
+ "hadoop.security.authorization": "true",
+ "hadoop.security.auth_to_local": "",
+ "hadoop.http.authentication.kerberos.name.rules": "",
+ "hadoop.http.filter.initializers": "",
+ "hadoop.http.authentication.type": "simple",
+ "hadoop.http.authentication.signature.secret": "",
+ "hadoop.http.authentication.signature.secret.file": "",
+ "hadoop.http.authentication.signer.secret.provider": "",
+ "hadoop.http.authentication.signer.secret.provider.object": "",
+ "hadoop.http.authentication.token.validity": "",
+ "hadoop.http.authentication.cookie.domain": "",
+ "hadoop.http.authentication.cookie.path": "",
+ "hadoop.proxyuser.HTTP.groups": "${hadoop-env/proxyuser_group}"
+ }
+ }
+ ],
+ "components": [
+ {
+ "name": "HDFS_CLIENT",
+ "identities": [
+ {
+ "name": "/HDFS/NAMENODE/hdfs"
+ }
+ ]
+ },
+ {
+ "name": "NAMENODE",
+ "identities": [
+ {
+ "name": "hdfs",
+ "principal": {
+ "value": "${hadoop-env/hdfs_user}-${cluster_name}@${realm}",
+ "type" : "user" ,
+ "configuration": "hadoop-env/hdfs_principal_name",
+ "local_username" : "${hadoop-env/hdfs_user}"
+ },
+ "keytab": {
+ "file": "${keytab_dir}/hdfs.headless.keytab",
+ "owner": {
+ "name": "${hadoop-env/hdfs_user}",
+ "access": "r"
+ },
+ "group": {
+ "name": "${cluster-env/user_group}",
+ "access": "r"
+ },
+ "configuration": "hadoop-env/hdfs_user_keytab"
+ }
+ },
+ {
+ "name": "namenode_nn",
+ "principal": {
+ "value": "nn/_HOST@${realm}",
+ "type" : "service",
+ "configuration": "hdfs-site/dfs.namenode.kerberos.principal",
+ "local_username" : "${hadoop-env/hdfs_user}"
+ },
+ "keytab": {
+ "file": "${keytab_dir}/nn.service.keytab",
+ "owner": {
+ "name": "${hadoop-env/hdfs_user}",
+ "access": "r"
+ },
+ "group": {
+ "name": "${cluster-env/user_group}",
+ "access": ""
+ },
+ "configuration": "hdfs-site/dfs.namenode.keytab.file"
+ }
+ },
+ {
+ "name": "/spnego",
+ "principal": {
+ "configuration": "hdfs-site/dfs.namenode.kerberos.internal.spnego.principal"
+ }
+ }
+ ],
+ "configurations": [
+ {
+ "hdfs-site": {
+ "dfs.block.access.token.enable": "true"
+ }
+ }
+ ]
+ },
+ {
+ "name": "DATANODE",
+ "identities": [
+ {
+ "name": "datanode_dn",
+ "principal": {
+ "value": "dn/_HOST@${realm}",
+ "type" : "service",
+ "configuration": "hdfs-site/dfs.datanode.kerberos.principal",
+ "local_username" : "${hadoop-env/hdfs_user}"
+ },
+ "keytab": {
+ "file": "${keytab_dir}/dn.service.keytab",
+ "owner": {
+ "name": "${hadoop-env/hdfs_user}",
+ "access": "r"
+ },
+ "group": {
+ "name": "${cluster-env/user_group}",
+ "access": ""
+ },
+ "configuration": "hdfs-site/dfs.datanode.keytab.file"
+ }
+ }
+ ],
+ "configurations" : [
+ {
+ "hdfs-site" : {
+ "dfs.datanode.address" : "0.0.0.0:1019",
+ "dfs.datanode.http.address": "0.0.0.0:1022"
+ }
+ }
+ ]
+ },
+ {
+ "name": "SECONDARY_NAMENODE",
+ "identities": [
+ {
+ "name": "secondary_namenode_nn",
+ "principal": {
+ "value": "nn/_HOST@${realm}",
+ "type" : "service",
+ "configuration": "hdfs-site/dfs.secondary.namenode.kerberos.principal",
+ "local_username" : "${hadoop-env/hdfs_user}"
+ },
+ "keytab": {
+ "file": "${keytab_dir}/nn.service.keytab",
+ "owner": {
+ "name": "${hadoop-env/hdfs_user}",
+ "access": "r"
+ },
+ "group": {
+ "name": "${cluster-env/user_group}",
+ "access": ""
+ },
+ "configuration": "hdfs-site/dfs.secondary.namenode.keytab.file"
+ }
+ },
+ {
+ "name": "/spnego",
+ "principal": {
+ "configuration": "hdfs-site/dfs.secondary.namenode.kerberos.internal.spnego.principal"
+ }
+ }
+ ]
+ },
+ {
+ "name": "NFS_GATEWAY",
+ "identities": [
+ {
+ "name": "nfsgateway",
+ "principal": {
+ "value": "nfs/_HOST@${realm}",
+ "type" : "service",
+ "configuration": "hdfs-site/nfs.kerberos.principal",
+ "local_username" : "${hadoop-env/hdfs_user}"
+ },
+ "keytab": {
+ "file": "${keytab_dir}/nfs.service.keytab",
+ "owner": {
+ "name": "${hadoop-env/hdfs_user}",
+ "access": "r"
+ },
+ "group": {
+ "name": "${cluster-env/user_group}",
+ "access": ""
+ },
+ "configuration": "hdfs-site/nfs.keytab.file"
+ }
+ }
+ ]
+ },
+ {
+ "name": "JOURNALNODE",
+ "identities": [
+ {
+ "name": "journalnode_jn",
+ "principal": {
+ "value": "jn/_HOST@${realm}",
+ "type" : "service",
+ "configuration": "hdfs-site/dfs.journalnode.kerberos.principal",
+ "local_username" : "${hadoop-env/hdfs_user}"
+ },
+ "keytab": {
+ "file": "${keytab_dir}/jn.service.keytab",
+ "owner": {
+ "name": "${hadoop-env/hdfs_user}",
+ "access": "r"
+ },
+ "group": {
+ "name": "${cluster-env/user_group}",
+ "access": ""
+ },
+ "configuration": "hdfs-site/dfs.journalnode.keytab.file"
+ }
+ },
+ {
+ "name": "/spnego",
+ "principal": {
+ "configuration": "hdfs-site/dfs.journalnode.kerberos.internal.spnego.principal"
+ }
+ }
+ ]
+ }
+ ]
+ }
+ ]
+}
http://git-wip-us.apache.org/repos/asf/ambari/blob/1863c3b9/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/metainfo.xml
new file mode 100755
index 0000000..918cdb3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.0/services/HDFS/metainfo.xml
@@ -0,0 +1,234 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>HDFS</name>
+ <displayName>HDFS</displayName>
+ <comment>Apache Hadoop Distributed File System</comment>
+ <version>2.7.1</version>
+ <components>
+ <component>
+ <name>NAMENODE</name>
+ <displayName>NameNode</displayName>
+ <category>MASTER</category>
+ <cardinality>1-2</cardinality>
+ <versionAdvertised>true</versionAdvertised>
+ <commandScript>
+ <script>scripts/namenode.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ <customCommands>
+ <customCommand>
+ <name>DECOMMISSION</name>
+ <commandScript>
+ <script>scripts/namenode.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ </customCommand>
+ <customCommand>
+ <name>REBALANCEHDFS</name>
+ <background>true</background>
+ <commandScript>
+ <script>scripts/namenode.py</script>
+ <scriptType>PYTHON</scriptType>
+ </commandScript>
+ </customCommand>
+ </customCommands>
+ </component>
+
+ <component>
+ <name>DATANODE</name>
+ <displayName>DataNode</displayName>
+ <category>SLAVE</category>
+ <cardinality>1+</cardinality>
+ <versionAdvertised>true</versionAdvertised>
+ <commandScript>
+ <script>scripts/datanode.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ </component>
+
+ <component>
+ <name>SECONDARY_NAMENODE</name>
+ <displayName>SNameNode</displayName>
+ <!-- TODO: cardinality is conditional on HA usage -->
+ <cardinality>1</cardinality>
+ <versionAdvertised>true</versionAdvertised>
+ <category>MASTER</category>
+ <commandScript>
+ <script>scripts/snamenode.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ </component>
+
+ <component>
+ <name>HDFS_CLIENT</name>
+ <displayName>HDFS Client</displayName>
+ <category>CLIENT</category>
+ <cardinality>1+</cardinality>
+ <versionAdvertised>true</versionAdvertised>
+ <commandScript>
+ <script>scripts/hdfs_client.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ <configFiles>
+ <configFile>
+ <type>xml</type>
+ <fileName>hdfs-site.xml</fileName>
+ <dictionaryName>hdfs-site</dictionaryName>
+ </configFile>
+ <configFile>
+ <type>xml</type>
+ <fileName>core-site.xml</fileName>
+ <dictionaryName>core-site</dictionaryName>
+ </configFile>
+ <configFile>
+ <type>env</type>
+ <fileName>log4j.properties</fileName>
+ <dictionaryName>hdfs-log4j,yarn-log4j</dictionaryName>
+ </configFile>
+ <configFile>
+ <type>env</type>
+ <fileName>hadoop-env.sh</fileName>
+ <dictionaryName>hadoop-env</dictionaryName>
+ </configFile>
+ </configFiles>
+ </component>
+
+ <component>
+ <name>JOURNALNODE</name>
+ <displayName>JournalNode</displayName>
+ <category>SLAVE</category>
+ <cardinality>0+</cardinality>
+ <versionAdvertised>true</versionAdvertised>
+ <commandScript>
+ <script>scripts/journalnode.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ </component>
+
+ <component>
+ <name>ZKFC</name>
+ <displayName>ZKFailoverController</displayName>
+ <category>SLAVE</category>
+ <!-- TODO: cardinality is conditional on HA topology -->
+ <cardinality>0+</cardinality>
+ <versionAdvertised>false</versionAdvertised>
+ <commandScript>
+ <script>scripts/zkfc_slave.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ </component>
+ </components>
+
+ <osSpecifics>
+ <osSpecific>
+ <osFamily>any</osFamily>
+ <packages>
+ <package>
+ <name>hadoop</name>
+ </package>
+ <package>
+ <name>hadoop-lzo</name>
+ <condition>should_install_lzo</condition>
+ </package>
+ </packages>
+ </osSpecific>
+
+ <osSpecific>
+ <osFamily>redhat6,suse11</osFamily>
+ <packages>
+ <package>
+ <name>snappy</name>
+ </package>
+ <package>
+ <name>lzo</name>
+ <condition>should_install_lzo</condition>
+ </package>
+ <package>
+ <name>hadoop-lzo-native</name>
+ <condition>should_install_lzo</condition>
+ </package>
+ <package>
+ <name>hadoop-libhdfs</name>
+ </package>
+ <package>
+ <name>ambari-log4j</name>
+ </package>
+ </packages>
+ </osSpecific>
+
+ <osSpecific>
+ <osFamily>ubuntu12</osFamily>
+ <packages>
+ <package>
+ <name>libsnappy1</name>
+ </package>
+ <package>
+ <name>libsnappy-dev</name>
+ </package>
+ <package>
+ <name>liblzo2-2</name>
+ <condition>should_install_lzo</condition>
+ </package>
+ <package>
+ <name>hadoop-hdfs</name>
+ </package>
+ <package>
+ <name>libhdfs0</name>
+ </package>
+ <package>
+ <name>libhdfs0-dev</name>
+ </package>
+ </packages>
+ </osSpecific>
+ </osSpecifics>
+
+ <commandScript>
+ <script>scripts/service_check.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>300</timeout>
+ </commandScript>
+
+ <requiredServices>
+ <service>ZOOKEEPER</service>
+ </requiredServices>
+
+ <configuration-dependencies>
+ <config-type>core-site</config-type>
+ <config-type>hdfs-site</config-type>
+ <config-type>hadoop-env</config-type>
+ <config-type>hadoop-policy</config-type>
+ <config-type>hdfs-log4j</config-type>
+ <config-type>ssl-client</config-type>
+ <config-type>ssl-server</config-type>
+ </configuration-dependencies>
+ <restartRequiredAfterRackChange>true</restartRequiredAfterRackChange>
+
+ </service>
+ </services>
+</metainfo>