You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ja...@apache.org on 2014/11/25 00:45:37 UTC
[18/24] ambari git commit: AMBARI-7872 Create stack definitions for
PHD-3.0.0.0 (vasanm, adenisso, tyu, Boxiong Ding, rpidva, rmeneses,
Sourabh Bansod, Ashvin Agrawal, Sujeet Varakhedi via jaoki)
http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/configuration/hdfs-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/configuration/hdfs-log4j.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/configuration/hdfs-log4j.xml
new file mode 100644
index 0000000..08822eb
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/configuration/hdfs-log4j.xml
@@ -0,0 +1,201 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+ <property>
+ <name>content</name>
+ <description>Custom log4j.properties</description>
+ <value>
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+
+# Define some default values that can be overridden by system properties
+# To change daemon root logger use hadoop_root_logger in hadoop-env
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+#Security audit appender
+#
+hadoop.security.logger=INFO,console
+hadoop.security.log.maxfilesize=256MB
+hadoop.security.log.maxbackupindex=20
+log4j.category.SecurityLogger=${hadoop.security.logger}
+hadoop.security.log.file=SecurityAuth.audit
+log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
+
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
+
+#
+# hdfs audit logging
+#
+hdfs.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
+log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
+log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# mapred audit logging
+#
+mapred.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
+log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
+log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
+log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# Rolling File Appender
+#
+
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Logfile size and and 30-day backups
+log4j.appender.RFA.MaxFileSize=256MB
+log4j.appender.RFA.MaxBackupIndex=10
+
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+# Custom Logging levels
+
+hadoop.metrics.log.level=INFO
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+#
+# Null Appender
+# Trap security logger on the hadoop client side
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+
+# Removes "deprecated" messages
+log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
+ </value>
+ </property>
+
+</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/configuration/hdfs-site.xml
new file mode 100644
index 0000000..e67ed9f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/configuration/hdfs-site.xml
@@ -0,0 +1,430 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration supports_final="true">
+
+ <!-- file system properties -->
+
+ <property>
+ <name>dfs.namenode.name.dir</name>
+ <!-- cluster variant -->
+ <value>/hadoop/hdfs/namenode</value>
+ <description>Determines where on the local filesystem the DFS name node
+ should store the name table. If this is a comma-delimited list
+ of directories then the name table is replicated in all of the
+ directories, for redundancy. </description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.support.append</name>
+ <value>true</value>
+ <description>to enable dfs append</description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.webhdfs.enabled</name>
+ <value>true</value>
+ <description>Whether to enable WebHDFS feature</description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.datanode.failed.volumes.tolerated</name>
+ <value>0</value>
+ <description> Number of failed disks a DataNode would tolerate before it stops offering service</description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.datanode.data.dir</name>
+ <value>/hadoop/hdfs/data</value>
+ <description>Determines where on the local filesystem an DFS data node
+ should store its blocks. If this is a comma-delimited
+ list of directories, then data will be stored in all named
+ directories, typically on different devices.
+ Directories that do not exist are ignored.
+ </description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.hosts.exclude</name>
+ <value>/etc/hadoop/conf/dfs.exclude</value>
+ <description>Names a file that contains a list of hosts that are
+ not permitted to connect to the namenode. The full pathname of the
+ file must be specified. If the value is empty, no hosts are
+ excluded.</description>
+ </property>
+
+ <!--
+ <property>
+ <name>dfs.hosts</name>
+ <value>/etc/hadoop/conf/dfs.include</value>
+ <description>Names a file that contains a list of hosts that are
+ permitted to connect to the namenode. The full pathname of the file
+ must be specified. If the value is empty, all hosts are
+ permitted.</description>
+ </property>
+ -->
+
+ <property>
+ <name>dfs.namenode.checkpoint.dir</name>
+ <value>/hadoop/hdfs/namesecondary</value>
+ <description>Determines where on the local filesystem the DFS secondary
+ name node should store the temporary images to merge.
+ If this is a comma-delimited list of directories then the image is
+ replicated in all of the directories for redundancy.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.namenode.checkpoint.edits.dir</name>
+ <value>${dfs.namenode.checkpoint.dir}</value>
+ <description>Determines where on the local filesystem the DFS secondary
+ name node should store the temporary edits to merge.
+ If this is a comma-delimited list of directoires then teh edits is
+ replicated in all of the directoires for redundancy.
+ Default value is same as dfs.namenode.checkpoint.dir
+ </description>
+ </property>
+
+
+ <property>
+ <name>dfs.namenode.checkpoint.period</name>
+ <value>21600</value>
+ <description>The number of seconds between two periodic checkpoints.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.namenode.checkpoint.txns</name>
+ <value>1000000</value>
+ <description>The Secondary NameNode or CheckpointNode will create a checkpoint
+ of the namespace every 'dfs.namenode.checkpoint.txns' transactions,
+ regardless of whether 'dfs.namenode.checkpoint.period' has expired.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.replication.max</name>
+ <value>50</value>
+ <description>Maximal block replication.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.replication</name>
+ <value>3</value>
+ <description>Default block replication.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.heartbeat.interval</name>
+ <value>3</value>
+ <description>Determines datanode heartbeat interval in seconds.</description>
+ </property>
+
+ <property>
+ <name>dfs.heartbeat.interval</name>
+ <value>3</value>
+ <description>Determines datanode heartbeat interval in seconds.</description>
+ </property>
+
+ <property>
+ <name>dfs.namenode.safemode.threshold-pct</name>
+ <value>1.0f</value>
+ <description>
+ Specifies the percentage of blocks that should satisfy
+ the minimal replication requirement defined by dfs.namenode.replication.min.
+ Values less than or equal to 0 mean not to start in safe mode.
+ Values greater than 1 will make safe mode permanent.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.datanode.balance.bandwidthPerSec</name>
+ <value>6250000</value>
+ <description>
+ Specifies the maximum amount of bandwidth that each datanode
+ can utilize for the balancing purpose in term of
+ the number of bytes per second.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.https.port</name>
+ <value>50470</value>
+ <description>
+ This property is used by HftpFileSystem.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.datanode.address</name>
+ <value>0.0.0.0:50010</value>
+ <description>
+ The datanode server address and port for data transfer.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.datanode.http.address</name>
+ <value>0.0.0.0:50075</value>
+ <description>
+ The datanode http server address and port.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.datanode.https.address</name>
+ <value>0.0.0.0:50475</value>
+ <description>
+ The datanode https server address and port.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.blocksize</name>
+ <value>134217728</value>
+ <description>The default block size for new files.</description>
+ </property>
+
+ <property>
+ <name>dfs.namenode.http-address</name>
+ <value>localhost:50070</value>
+ <description>The name of the default file system. Either the
+ literal string "local" or a host:port for NDFS.</description>
+ <final>true</final>
+ </property>
+
+ <property>
+ <name>dfs.datanode.du.reserved</name>
+ <!-- cluster variant -->
+ <value>1073741824</value>
+ <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.datanode.ipc.address</name>
+ <value>0.0.0.0:8010</value>
+ <description>
+ The datanode ipc server address and port.
+ If the port is 0 then the server will start on a free port.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.blockreport.initialDelay</name>
+ <value>120</value>
+ <description>Delay for first block report in seconds.</description>
+ </property>
+
+ <property>
+ <name>dfs.namenode.handler.count</name>
+ <value>40</value>
+ <description>The number of server threads for the namenode.</description>
+ </property>
+
+ <property>
+ <name>dfs.datanode.max.transfer.threads</name>
+ <value>1024</value>
+ <description>Specifies the maximum number of threads to use for transferring data in and out of the datanode.</description>
+ </property>
+
+ <!-- Permissions configuration -->
+
+ <property>
+ <name>fs.permissions.umask-mode</name>
+ <value>022</value>
+ <description>
+ The octal umask used when creating files and directories.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.permissions.enabled</name>
+ <value>true</value>
+ <description>
+ If "true", enable permission checking in HDFS.
+ If "false", permission checking is turned off,
+ but all other behavior is unchanged.
+ Switching from one parameter value to the other does not change the mode,
+ owner or group of files or directories.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.permissions.superusergroup</name>
+ <value>hdfs</value>
+ <description>The name of the group of super-users.</description>
+ </property>
+
+ <property>
+ <name>dfs.namenode.handler.count</name>
+ <value>100</value>
+ <description>Added to grow Queue size so that more client connections are allowed</description>
+ </property>
+
+ <property>
+ <name>dfs.block.access.token.enable</name>
+ <value>true</value>
+ <description>
+ If "true", access tokens are used as capabilities for accessing datanodes.
+ If "false", no access tokens are checked on accessing datanodes.
+ </description>
+ </property>
+
+ <property>
+ <!-- cluster variant -->
+ <name>dfs.namenode.secondary.http-address</name>
+ <value>localhost:50090</value>
+ <description>Address of secondary namenode web server</description>
+ </property>
+
+
+ <property>
+ <name>dfs.namenode.https-address</name>
+ <value>localhost:50470</value>
+ <description>The https address where namenode binds</description>
+
+ </property>
+
+ <property>
+ <name>dfs.datanode.data.dir.perm</name>
+ <value>750</value>
+ <description>The permissions that should be there on dfs.datanode.data.dir
+ directories. The datanode will not come up if the permissions are
+ different on existing dfs.datanode.data.dir directories. If the directories
+ don't exist, they will be created with this permission.</description>
+ </property>
+
+ <property>
+ <name>dfs.namenode.accesstime.precision</name>
+ <value>0</value>
+ <description>The access time for HDFS file is precise upto this value.
+ The default value is 1 hour. Setting a value of 0 disables
+ access times for HDFS.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.cluster.administrators</name>
+ <value> hdfs</value>
+ <description>ACL for who all can view the default servlets in the HDFS</description>
+ </property>
+
+ <property>
+ <name>dfs.namenode.avoid.read.stale.datanode</name>
+ <value>true</value>
+ <description>
+ Indicate whether or not to avoid reading from stale datanodes whose
+ heartbeat messages have not been received by the namenode for more than a
+ specified time interval.
+ </description>
+ </property>
+ <property>
+ <name>dfs.namenode.avoid.write.stale.datanode</name>
+ <value>true</value>
+ <description>
+ Indicate whether or not to avoid writing to stale datanodes whose
+ heartbeat messages have not been received by the namenode for more than a
+ specified time interval.
+ </description>
+ </property>
+ <property>
+ <name>dfs.namenode.write.stale.datanode.ratio</name>
+ <value>1.0f</value>
+ <description>When the ratio of number stale datanodes to total datanodes marked is greater
+ than this ratio, stop avoiding writing to stale nodes so as to prevent causing hotspots.
+ </description>
+ </property>
+ <property>
+ <name>dfs.namenode.stale.datanode.interval</name>
+ <value>30000</value>
+ <description>Datanode is stale after not getting a heartbeat in this interval in ms</description>
+ </property>
+
+ <property>
+ <name>dfs.journalnode.http-address</name>
+ <value>0.0.0.0:8480</value>
+ <description>The address and port the JournalNode web UI listens on.
+ If the port is 0 then the server will start on a free port. </description>
+ </property>
+
+ <property>
+ <name>dfs.journalnode.edits.dir</name>
+ <value>/grid/0/hdfs/journal</value>
+ <description>The path where the JournalNode daemon will store its local state. </description>
+ </property>
+
+ <!-- HDFS Short-Circuit Local Reads -->
+
+ <property>
+ <name>dfs.client.read.shortcircuit</name>
+ <value>true</value>
+ <description>
+ This configuration parameter turns on short-circuit local reads.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.domain.socket.path</name>
+ <value>/var/lib/hadoop-hdfs/dn_socket</value>
+ <description>
+ This is a path to a UNIX domain socket that will be used for communication between the DataNode and local HDFS clients.
+ If the string "_PORT" is present in this path, it will be replaced by the TCP port of the DataNode.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.client.read.shortcircuit.streams.cache.size</name>
+ <value>4096</value>
+ <description>
+ The DFSClient maintains a cache of recently opened file descriptors. This
+ parameter controls the size of that cache. Setting this higher will use
+ more file descriptors, but potentially provide better performance on
+ workloads involving lots of seeks.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.namenode.name.dir.restore</name>
+ <value>true</value>
+ <description>Set to true to enable NameNode to attempt recovering a previously failed dfs.namenode.name.dir.
+ When enabled, a recovery of any failed directory is attempted during checkpoint.</description>
+ </property>
+
+ <property>
+ <name>dfs.http.policy</name>
+ <value>HTTP_ONLY</value>
+ <description>
+ Decide if HTTPS(SSL) is supported on HDFS This configures the HTTP endpoint for HDFS daemons:
+ The following values are supported: - HTTP_ONLY : Service is provided only on http - HTTPS_ONLY :
+ Service is provided only on https - HTTP_AND_HTTPS : Service is provided both on http and https
+ </description>
+ </property>
+
+</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/metainfo.xml
new file mode 100644
index 0000000..fdf8011
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/metainfo.xml
@@ -0,0 +1,225 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>HDFS</name>
+ <displayName>HDFS</displayName>
+ <comment>Apache Hadoop Distributed File System</comment>
+ <version>2.4.1.phd.3.0.0.0</version>
+
+ <components>
+ <component>
+ <name>NAMENODE</name>
+ <displayName>NameNode</displayName>
+ <category>MASTER</category>
+ <cardinality>1-2</cardinality>
+ <commandScript>
+ <script>scripts/namenode.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ <customCommands>
+ <customCommand>
+ <name>DECOMMISSION</name>
+ <commandScript>
+ <script>scripts/namenode.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ </customCommand>
+ <customCommand>
+ <name>REBALANCEHDFS</name>
+ <background>true</background>
+ <commandScript>
+ <script>scripts/namenode.py</script>
+ <scriptType>PYTHON</scriptType>
+ </commandScript>
+ </customCommand>
+ </customCommands>
+ </component>
+
+ <component>
+ <name>DATANODE</name>
+ <displayName>DataNode</displayName>
+ <category>SLAVE</category>
+ <cardinality>1+</cardinality>
+ <commandScript>
+ <script>scripts/datanode.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ </component>
+
+ <component>
+ <name>SECONDARY_NAMENODE</name>
+ <displayName>SNameNode</displayName>
+
+ <cardinality>1</cardinality>
+ <category>MASTER</category>
+ <commandScript>
+ <script>scripts/snamenode.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ </component>
+
+ <component>
+ <name>HDFS_CLIENT</name>
+ <displayName>HDFS Client</displayName>
+ <category>CLIENT</category>
+ <cardinality>1+</cardinality>
+ <commandScript>
+ <script>scripts/hdfs_client.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ <configFiles>
+ <configFile>
+ <type>xml</type>
+ <fileName>hdfs-site.xml</fileName>
+ <dictionaryName>hdfs-site</dictionaryName>
+ </configFile>
+ <configFile>
+ <type>xml</type>
+ <fileName>core-site.xml</fileName>
+ <dictionaryName>core-site</dictionaryName>
+ </configFile>
+ <configFile>
+ <type>env</type>
+ <fileName>log4j.properties</fileName>
+ <dictionaryName>hdfs-log4j,yarn-log4j</dictionaryName>
+ </configFile>
+ <configFile>
+ <type>env</type>
+ <fileName>hadoop-env.sh</fileName>
+ <dictionaryName>hadoop-env</dictionaryName>
+ </configFile>
+ </configFiles>
+ </component>
+
+ <component>
+ <name>JOURNALNODE</name>
+ <displayName>JournalNode</displayName>
+ <category>SLAVE</category>
+ <cardinality>0+</cardinality>
+ <commandScript>
+ <script>scripts/journalnode.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ </component>
+
+ <component>
+ <name>ZKFC</name>
+ <displayName>ZKFailoverController</displayName>
+ <category>SLAVE</category>
+
+ <cardinality>0+</cardinality>
+ <commandScript>
+ <script>scripts/zkfc_slave.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>600</timeout>
+ </commandScript>
+ </component>
+ </components>
+
+ <osSpecifics>
+ <osSpecific>
+ <osFamily>any</osFamily>
+ <packages>
+ <package>
+ <name>hadoop</name>
+ </package>
+ <package>
+ <name>hadoop-lzo</name>
+ </package>
+ </packages>
+ </osSpecific>
+
+ <osSpecific>
+ <osFamily>redhat5,redhat6,suse11</osFamily>
+ <packages>
+ <package>
+ <name>snappy</name>
+ </package>
+ <package>
+ <name>snappy-devel</name>
+ </package>
+ <package>
+ <name>lzo</name>
+ </package>
+ <package>
+ <name>hadoop-lzo-native</name>
+ </package>
+ <package>
+ <name>hadoop-libhdfs</name>
+ </package>
+ <package>
+ <name>ambari-log4j</name>
+ </package>
+ </packages>
+ </osSpecific>
+
+ <osSpecific>
+ <osFamily>ubuntu12</osFamily>
+ <packages>
+ <package>
+ <name>libsnappy1</name>
+ </package>
+ <package>
+ <name>libsnappy-dev</name>
+ </package>
+ <package>
+ <name>liblzo2-2</name>
+ </package>
+ <package>
+ <name>hadoop-hdfs</name>
+ </package>
+ <package>
+ <name>libhdfs0</name>
+ </package>
+ <package>
+ <name>libhdfs0-dev</name>
+ </package>
+ </packages>
+ </osSpecific>
+
+ </osSpecifics>
+
+ <commandScript>
+ <script>scripts/service_check.py</script>
+ <scriptType>PYTHON</scriptType>
+ <timeout>300</timeout>
+ </commandScript>
+
+ <requiredServices>
+ <service>ZOOKEEPER</service>
+ </requiredServices>
+
+ <configuration-dependencies>
+ <config-type>core-site</config-type>
+ <config-type>hdfs-site</config-type>
+ <config-type>hadoop-env</config-type>
+ <config-type>hadoop-policy</config-type>
+ <config-type>hdfs-log4j</config-type>
+ </configuration-dependencies>
+ </service>
+ </services>
+</metainfo>