You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by nc...@apache.org on 2016/12/16 22:02:43 UTC
[35/51] [abbrv] ambari git commit: AMBARI-19220. Fix version of HDFS
and YARN used by HDP 3.0 (alejandro)
http://git-wip-us.apache.org/repos/asf/ambari/blob/ab4b864c/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hadoop-env.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hadoop-env.xml
deleted file mode 100644
index 24032fa..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hadoop-env.xml
+++ /dev/null
@@ -1,421 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_adding_forbidden="true">
- <property>
- <name>hdfs_log_dir_prefix</name>
- <value>/var/log/hadoop</value>
- <description>Hadoop Log Dir Prefix</description>
- <display-name>Hadoop Log Dir Prefix</display-name>
- <value-attributes>
- <type>directory</type>
- <overridable>false</overridable>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hadoop_pid_dir_prefix</name>
- <value>/var/run/hadoop</value>
- <display-name>Hadoop PID Dir Prefix</display-name>
- <description>Hadoop PID Dir Prefix</description>
- <value-attributes>
- <type>directory</type>
- <overridable>false</overridable>
- <editable-only-at-install>true</editable-only-at-install>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hadoop_root_logger</name>
- <value>INFO,RFA</value>
- <display-name>Hadoop Root Logger</display-name>
- <description>Hadoop Root Logger</description>
- <value-attributes>
- <overridable>false</overridable>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hadoop_heapsize</name>
- <value>1024</value>
- <description>Hadoop maximum Java heap size</description>
- <display-name>Hadoop maximum Java heap size</display-name>
- <value-attributes>
- <type>int</type>
- <unit>MB</unit>
- <overridable>false</overridable>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>namenode_heapsize</name>
- <value>1024</value>
- <description>NameNode Java heap size</description>
- <display-name>NameNode Java heap size</display-name>
- <value-attributes>
- <type>int</type>
- <minimum>0</minimum>
- <maximum>268435456</maximum>
- <unit>MB</unit>
- <increment-step>256</increment-step>
- <overridable>false</overridable>
- </value-attributes>
- <depends-on>
- <property>
- <type>hdfs-site</type>
- <name>dfs.datanode.data.dir</name>
- </property>
- </depends-on>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>namenode_opt_newsize</name>
- <value>200</value>
- <description>Default size of Java new generation for NameNode (Java option -XX:NewSize) Note: The value of NameNode new generation size (default size of Java new generation for NameNode (Java option -XX:NewSize)) should be 1/8 of maximum heap size (-Xmx). Ensure that the value of the namenode_opt_newsize property is 1/8 the value of maximum heap size (-Xmx).</description>
- <display-name>NameNode new generation size</display-name>
- <depends-on>
- <property>
- <type>hadoop-env</type>
- <name>namenode_heapsize</name>
- </property>
- </depends-on>
- <value-attributes>
- <type>int</type>
- <minimum>0</minimum>
- <maximum>16384</maximum>
- <unit>MB</unit>
- <increment-step>256</increment-step>
- <overridable>false</overridable>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>namenode_opt_maxnewsize</name>
- <value>200</value>
- <description>NameNode maximum new generation size</description>
- <display-name>NameNode maximum new generation size</display-name>
- <depends-on>
- <property>
- <type>hadoop-env</type>
- <name>namenode_heapsize</name>
- </property>
- </depends-on>
- <value-attributes>
- <type>int</type>
- <minimum>0</minimum>
- <maximum>16384</maximum>
- <unit>MB</unit>
- <increment-step>256</increment-step>
- <overridable>false</overridable>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>namenode_opt_permsize</name>
- <value>128</value>
- <description>NameNode permanent generation size</description>
- <display-name>NameNode permanent generation size</display-name>
- <value-attributes>
- <type>int</type>
- <minimum>0</minimum>
- <maximum>2096</maximum>
- <unit>MB</unit>
- <increment-step>128</increment-step>
- <overridable>false</overridable>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>namenode_opt_maxpermsize</name>
- <value>256</value>
- <description>NameNode maximum permanent generation size</description>
- <display-name>NameNode maximum permanent generation size</display-name>
- <value-attributes>
- <type>int</type>
- <minimum>0</minimum>
- <maximum>2096</maximum>
- <unit>MB</unit>
- <increment-step>128</increment-step>
- <overridable>false</overridable>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dtnode_heapsize</name>
- <value>1024</value>
- <description>DataNode maximum Java heap size</description>
- <display-name>DataNode maximum Java heap size</display-name>
- <value-attributes>
- <type>int</type>
- <minimum>0</minimum>
- <maximum>268435456</maximum>
- <unit>MB</unit>
- <increment-step>128</increment-step>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>proxyuser_group</name>
- <display-name>Proxy User Group</display-name>
- <value>users</value>
- <property-type>GROUP</property-type>
- <description>Proxy user group.</description>
- <value-attributes>
- <type>user</type>
- <overridable>false</overridable>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hdfs_user</name>
- <display-name>HDFS User</display-name>
- <value>hdfs</value>
- <property-type>USER</property-type>
- <description>User to run HDFS as</description>
- <value-attributes>
- <type>user</type>
- <overridable>false</overridable>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hdfs_tmp_dir</name>
- <value>/tmp</value>
- <description>HDFS tmp Dir</description>
- <display-name>HDFS tmp Dir</display-name>
- <property-type>NOT_MANAGED_HDFS_PATH</property-type>
- <value-attributes>
- <read-only>true</read-only>
- <overridable>false</overridable>
- <visible>false</visible>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hdfs_user_nofile_limit</name>
- <value>128000</value>
- <description>Max open files limit setting for HDFS user.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hdfs_user_nproc_limit</name>
- <value>65536</value>
- <description>Max number of processes limit setting for HDFS user.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>namenode_backup_dir</name>
- <description>Local directory for storing backup copy of NameNode images during upgrade</description>
- <value>/tmp/upgrades</value>
- <on-ambari-upgrade add="false"/>
- </property>
- <property>
- <name>hdfs_user_keytab</name>
- <description>HDFS keytab path</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>hdfs_principal_name</name>
- <description>HDFS principal name</description>
- <on-ambari-upgrade add="true"/>
- </property>
-
- <!-- These configs were inherited from HDP 2.2 -->
- <property>
- <name>keyserver_host</name>
- <value> </value>
- <display-name>Key Server Host</display-name>
- <description>Hostnames where Key Management Server is installed</description>
- <value-attributes>
- <type>string</type>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>keyserver_port</name>
- <value/>
- <display-name>Key Server Port</display-name>
- <description>Port number where Key Management Server is available</description>
- <value-attributes>
- <type>int</type>
- <empty-value-valid>true</empty-value-valid>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
-
- <!-- These configs were inherited from HDP 2.3 -->
- <!-- hadoop-env.sh -->
- <property>
- <name>content</name>
- <display-name>hadoop-env template</display-name>
- <description>This is the jinja template for hadoop-env.sh file</description>
- <value>
- # Set Hadoop-specific environment variables here.
-
- # The only required environment variable is JAVA_HOME. All others are
- # optional. When running a distributed configuration it is best to
- # set JAVA_HOME in this file, so that it is correctly defined on
- # remote nodes.
-
- # The java implementation to use. Required.
- export JAVA_HOME={{java_home}}
- export HADOOP_HOME_WARN_SUPPRESS=1
-
- # Hadoop home directory
- export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
-
- # Hadoop Configuration Directory
- #TODO: if env var set that can cause problems
- export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
-
-
- # Path to jsvc required by secure datanode
- export JSVC_HOME={{jsvc_path}}
-
-
- # The maximum amount of heap to use, in MB. Default is 1000.
- if [[ ("$SERVICE" = "hiveserver2") || ("$SERVICE" = "metastore") || ( "$SERVICE" = "cli") ]]; then
- if [ "$HADOOP_HEAPSIZE" = "" ]; then
- export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
- fi
- else
- export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
- fi
-
-
- export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
-
- # Extra Java runtime options. Empty by default.
- export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
-
- # Command specific options appended to HADOOP_OPTS when specified
-
- {% if java_version < 8 %}
- export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}"
- export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
-
- # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
- export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS"
- {% else %}
- export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}"
- export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
-
- # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
- export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
- {% endif %}
- HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
-
- HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
- HADOOP_DATANODE_OPTS="-XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -XX:ConcGCThreads=4 -XX:+UseConcMarkSweepGC -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
- HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
-
- # On secure datanodes, user to run the datanode as after dropping privileges
- export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}
-
- # Extra ssh options. Empty by default.
- export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
-
- # Where log files are stored. $HADOOP_HOME/logs by default.
- export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
-
- # History server logs
- export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
-
- # Where log files are stored in the secure data environment.
- export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
-
- # File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.
- # export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
-
- # host:path where hadoop code should be rsync'd from. Unset by default.
- # export HADOOP_MASTER=master:/home/$USER/src/hadoop
-
- # Seconds to sleep between slave commands. Unset by default. This
- # can be useful in large clusters, where, e.g., slave rsyncs can
- # otherwise arrive faster than the master can service them.
- # export HADOOP_SLAVE_SLEEP=0.1
-
- # The directory where pid files are stored. /tmp by default.
- export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
- export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
-
- # History server pid
- export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
-
- YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY -Drm.audit.logger=INFO,RMAUDIT"
-
- # A string representing this instance of hadoop. $USER by default.
- export HADOOP_IDENT_STRING=$USER
-
- # The scheduling priority for daemon processes. See 'man nice'.
-
- # export HADOOP_NICENESS=10
-
- # Add database libraries
- JAVA_JDBC_LIBS=""
- if [ -d "/usr/share/java" ]; then
- for jarFile in `ls /usr/share/java | grep -E "(mysql|ojdbc|postgresql|sqljdbc)" 2>/dev/null`
- do
- JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
- done
- fi
-
- # Add libraries required by nodemanager
- MAPREDUCE_LIBS={{mapreduce_libs_path}}
-
- # Add libraries to the hadoop classpath - some may not need a colon as they already include it
- export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
-
- if [ -d "/usr/lib/tez" ]; then
- export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf
- fi
-
- # Setting path to hdfs command line
- export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
-
- #Mostly required for hadoop 2.0
- export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64
-
- {% if is_datanode_max_locked_memory_set %}
- # Fix temporary bug, when ulimit from conf files is not picked up, without full relogin.
- # Makes sense to fix only when runing DN as root
- if [ "$command" == "datanode" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then
- ulimit -l {{datanode_max_locked_memory}}
- fi
- {% endif %}
- </value>
- <value-attributes>
- <type>content</type>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>nfsgateway_heapsize</name>
- <display-name>NFSGateway maximum Java heap size</display-name>
- <value>1024</value>
- <description>Maximum Java heap size for NFSGateway (Java option -Xmx)</description>
- <value-attributes>
- <type>int</type>
- <unit>MB</unit>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
-</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/ab4b864c/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hadoop-metrics2.properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hadoop-metrics2.properties.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hadoop-metrics2.properties.xml
deleted file mode 100644
index 6b45e84..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hadoop-metrics2.properties.xml
+++ /dev/null
@@ -1,125 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
- <!-- hadoop-metrics2.properties -->
- <property>
- <name>content</name>
- <display-name>hadoop-metrics2.properties template</display-name>
- <description>This is the jinja template for hadoop-metrics2.properties file</description>
- <value>
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# syntax: [prefix].[source|sink|jmx].[instance].[options]
-# See package.html for org.apache.hadoop.metrics2 for details
-
-{% if has_ganglia_server %}
-*.period=60
-
-*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
-*.sink.ganglia.period=10
-
-# default for supportsparse is false
-*.sink.ganglia.supportsparse=true
-
-.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
-.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
-
-# Hook up to the server
-namenode.sink.ganglia.servers={{ganglia_server_host}}:8661
-datanode.sink.ganglia.servers={{ganglia_server_host}}:8659
-jobtracker.sink.ganglia.servers={{ganglia_server_host}}:8662
-tasktracker.sink.ganglia.servers={{ganglia_server_host}}:8658
-maptask.sink.ganglia.servers={{ganglia_server_host}}:8660
-reducetask.sink.ganglia.servers={{ganglia_server_host}}:8660
-resourcemanager.sink.ganglia.servers={{ganglia_server_host}}:8664
-nodemanager.sink.ganglia.servers={{ganglia_server_host}}:8657
-historyserver.sink.ganglia.servers={{ganglia_server_host}}:8666
-journalnode.sink.ganglia.servers={{ganglia_server_host}}:8654
-nimbus.sink.ganglia.servers={{ganglia_server_host}}:8649
-supervisor.sink.ganglia.servers={{ganglia_server_host}}:8650
-
-resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
-
-{% endif %}
-
-{% if has_metric_collector %}
-
-*.period={{metrics_collection_period}}
-*.sink.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
-*.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
-*.sink.timeline.period={{metrics_collection_period}}
-*.sink.timeline.sendInterval={{metrics_report_interval}}000
-*.sink.timeline.slave.host.name={{hostname}}
-*.sink.timeline.zookeeper.quorum={{zookeeper_quorum}}
-*.sink.timeline.protocol={{metric_collector_protocol}}
-*.sink.timeline.port={{metric_collector_port}}
-
-# HTTPS properties
-*.sink.timeline.truststore.path = {{metric_truststore_path}}
-*.sink.timeline.truststore.type = {{metric_truststore_type}}
-*.sink.timeline.truststore.password = {{metric_truststore_password}}
-
-datanode.sink.timeline.collector.hosts={{ams_collector_hosts}}
-namenode.sink.timeline.collector.hosts={{ams_collector_hosts}}
-resourcemanager.sink.timeline.collector.hosts={{ams_collector_hosts}}
-nodemanager.sink.timeline.collector.hosts={{ams_collector_hosts}}
-jobhistoryserver.sink.timeline.collector.hosts={{ams_collector_hosts}}
-journalnode.sink.timeline.collector.hosts={{ams_collector_hosts}}
-maptask.sink.timeline.collector.hosts={{ams_collector_hosts}}
-reducetask.sink.timeline.collector.hosts={{ams_collector_hosts}}
-applicationhistoryserver.sink.timeline.collector.hosts={{ams_collector_hosts}}
-
-resourcemanager.sink.timeline.tagsForPrefix.yarn=Queue
-
-{% if is_nn_client_port_configured %}
-# Namenode rpc ports customization
-namenode.sink.timeline.metric.rpc.client.port={{nn_rpc_client_port}}
-{% endif %}
-{% if is_nn_dn_port_configured %}
-namenode.sink.timeline.metric.rpc.datanode.port={{nn_rpc_dn_port}}
-{% endif %}
-{% if is_nn_healthcheck_port_configured %}
-namenode.sink.timeline.metric.rpc.healthcheck.port={{nn_rpc_healthcheck_port}}
-{% endif %}
-
-{% endif %}
- </value>
- <value-attributes>
- <type>content</type>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
-</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/ab4b864c/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hadoop-policy.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hadoop-policy.xml
deleted file mode 100644
index 8e9486d..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hadoop-policy.xml
+++ /dev/null
@@ -1,130 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<!-- Put site-specific property overrides in this file. -->
-<configuration supports_final="true">
- <property>
- <name>security.client.protocol.acl</name>
- <value>*</value>
- <description>ACL for ClientProtocol, which is used by user code
- via the DistributedFileSystem.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>security.client.datanode.protocol.acl</name>
- <value>*</value>
- <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
- for block recovery.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>security.datanode.protocol.acl</name>
- <value>*</value>
- <description>ACL for DatanodeProtocol, which is used by datanodes to
- communicate with the namenode.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>security.inter.datanode.protocol.acl</name>
- <value>*</value>
- <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
- for updating generation timestamp.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>security.namenode.protocol.acl</name>
- <value>*</value>
- <description>ACL for NamenodeProtocol, the protocol used by the secondary
- namenode to communicate with the namenode.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>security.inter.tracker.protocol.acl</name>
- <value>*</value>
- <description>ACL for InterTrackerProtocol, used by the tasktrackers to
- communicate with the jobtracker.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>security.job.client.protocol.acl</name>
- <value>*</value>
- <description>ACL for JobSubmissionProtocol, used by job clients to
- communciate with the jobtracker for job submission, querying job status etc.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>security.job.task.protocol.acl</name>
- <value>*</value>
- <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
- tasks to communicate with the parent tasktracker.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>security.admin.operations.protocol.acl</name>
- <value>hadoop</value>
- <description>ACL for AdminOperationsProtocol. Used for admin commands.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>security.refresh.usertogroups.mappings.protocol.acl</name>
- <value>hadoop</value>
- <description>ACL for RefreshUserMappingsProtocol. Used to refresh
- users mappings. The ACL is a comma-separated list of user and
- group names. The user and group list is separated by a blank. For
- e.g. "alice,bob users,wheel". A special value of "*" means all
- users are allowed.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>security.refresh.policy.protocol.acl</name>
- <value>hadoop</value>
- <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
- dfsadmin and mradmin commands to refresh the security policy in-effect.
- The ACL is a comma-separated list of user and group names. The user and
- group list is separated by a blank. For e.g. "alice,bob users,wheel".
- A special value of "*" means all users are allowed.</description>
- <on-ambari-upgrade add="true"/>
- </property>
-</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/ab4b864c/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hdfs-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hdfs-log4j.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hdfs-log4j.xml
deleted file mode 100644
index 37b339e..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hdfs-log4j.xml
+++ /dev/null
@@ -1,226 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
- <!-- These configs were inherited from HDP 2.2 -->
- <property>
- <name>content</name>
- <display-name>hdfs-log4j template</display-name>
- <description>Custom log4j.properties</description>
- <value>
- #
- # Licensed to the Apache Software Foundation (ASF) under one
- # or more contributor license agreements. See the NOTICE file
- # distributed with this work for additional information
- # regarding copyright ownership. The ASF licenses this file
- # to you under the Apache License, Version 2.0 (the
- # "License"); you may not use this file except in compliance
- # with the License. You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing,
- # software distributed under the License is distributed on an
- # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- # KIND, either express or implied. See the License for the
- # specific language governing permissions and limitations
- # under the License.
- #
-
-
- # Define some default values that can be overridden by system properties
- # To change daemon root logger use hadoop_root_logger in hadoop-env
- hadoop.root.logger=INFO,console
- hadoop.log.dir=.
- hadoop.log.file=hadoop.log
-
-
- # Define the root logger to the system property "hadoop.root.logger".
- log4j.rootLogger=${hadoop.root.logger}, EventCounter
-
- # Logging Threshold
- log4j.threshhold=ALL
-
- #
- # Daily Rolling File Appender
- #
-
- log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
- log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
- # Rollver at midnight
- log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
- # 30-day backup
- #log4j.appender.DRFA.MaxBackupIndex=30
- log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
- # Pattern format: Date LogLevel LoggerName LogMessage
- log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
- # Debugging Pattern format
- #log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
- #
- # console
- # Add "console" to rootlogger above if you want to use this
- #
-
- log4j.appender.console=org.apache.log4j.ConsoleAppender
- log4j.appender.console.target=System.err
- log4j.appender.console.layout=org.apache.log4j.PatternLayout
- log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-
- #
- # TaskLog Appender
- #
-
- #Default values
- hadoop.tasklog.taskid=null
- hadoop.tasklog.iscleanup=false
- hadoop.tasklog.noKeepSplits=4
- hadoop.tasklog.totalLogFileSize=100
- hadoop.tasklog.purgeLogSplits=true
- hadoop.tasklog.logsRetainHours=12
-
- log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
- log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
- log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
- log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
-
- log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
- log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-
- #
- #Security audit appender
- #
- hadoop.security.logger=INFO,console
- hadoop.security.log.maxfilesize=256MB
- hadoop.security.log.maxbackupindex=20
- log4j.category.SecurityLogger=${hadoop.security.logger}
- hadoop.security.log.file=SecurityAuth.audit
- log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
- log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
- log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
- log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
- log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
-
- log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
- log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
- log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
- log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
- log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
- log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
-
- #
- # hdfs audit logging
- #
- hdfs.audit.logger=INFO,console
- log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
- log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
- log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
- log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
- log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
- log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
- log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
-
- #
- # NameNode metrics logging.
- # The default is to retain two namenode-metrics.log files up to 64MB each.
- #
- namenode.metrics.logger=INFO,NullAppender
- log4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}
- log4j.additivity.NameNodeMetricsLog=false
- log4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender
- log4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log
- log4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout
- log4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n
- log4j.appender.NNMETRICSRFA.MaxBackupIndex=1
- log4j.appender.NNMETRICSRFA.MaxFileSize=64MB
-
- #
- # mapred audit logging
- #
- mapred.audit.logger=INFO,console
- log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
- log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
- log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
- log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
- log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
- log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
- log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
-
- #
- # Rolling File Appender
- #
-
- log4j.appender.RFA=org.apache.log4j.RollingFileAppender
- log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
- # Logfile size and and 30-day backups
- log4j.appender.RFA.MaxFileSize=256MB
- log4j.appender.RFA.MaxBackupIndex=10
-
- log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
- log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
- log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
- # Custom Logging levels
-
- hadoop.metrics.log.level=INFO
- #log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
- #log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
- #log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
- log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
-
- # Jets3t library
- log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
-
- #
- # Null Appender
- # Trap security logger on the hadoop client side
- #
- log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
-
- #
- # Event Counter Appender
- # Sends counts of logging messages at different severity levels to Hadoop Metrics.
- #
- log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
-
- # Removes "deprecated" messages
- log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
-
- #
- # HDFS block state change log from block manager
- #
- # Uncomment the following to suppress normal block state change
- # messages from BlockManager in NameNode.
- #log4j.logger.BlockStateChange=WARN
- </value>
- <value-attributes>
- <type>content</type>
- <show-property-name>false</show-property-name>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
-</configuration>
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/ab4b864c/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hdfs-logsearch-conf.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hdfs-logsearch-conf.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hdfs-logsearch-conf.xml
deleted file mode 100644
index d85a028..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hdfs-logsearch-conf.xml
+++ /dev/null
@@ -1,248 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
- <property>
- <name>service_name</name>
- <display-name>Service name</display-name>
- <description>Service name for Logsearch Portal (label)</description>
- <value>HDFS</value>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>component_mappings</name>
- <display-name>Component mapping</display-name>
- <description>Logsearch component logid mapping list (e.g.: COMPONENT1:logid1,logid2;COMPONENT2:logid3)</description>
- <value>NAMENODE:hdfs_namenode;DATANODE:hdfs_datanode;SECONDARY_NAMENODE:hdfs_secondarynamenode;JOURNALNODE:hdfs_journalnode;ZKFC:hdfs_zkfc;NFS_GATEWAY:hdfs_nfs3</value>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>content</name>
- <display-name>Logfeeder Config</display-name>
- <description>Metadata jinja template for Logfeeder which contains grok patterns for reading service specific logs.</description>
- <value>
-{
- "input":[
- {
- "type":"hdfs_datanode",
- "rowtype":"service",
- "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-datanode-*.log"
- },
- {
- "type":"hdfs_namenode",
- "rowtype":"service",
- "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-namenode-*.log"
- },
- {
- "type":"hdfs_journalnode",
- "rowtype":"service",
- "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-journalnode-*.log"
- },
- {
- "type":"hdfs_secondarynamenode",
- "rowtype":"service",
- "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-secondarynamenode-*.log"
- },
- {
- "type":"hdfs_zkfc",
- "rowtype":"service",
- "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-zkfc-*.log"
- },
- {
- "type":"hdfs_nfs3",
- "rowtype":"service",
- "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-nfs3-*.log"
- },
- {
- "type":"hdfs_audit",
- "rowtype":"audit",
- "is_enabled":"true",
- "add_fields":{
- "logType":"HDFSAudit",
- "enforcer":"hadoop-acl",
- "repoType":"1",
- "repo":"hdfs"
- },
- "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hdfs-audit.log"
- }
- ],
- "filter":[
- {
- "filter":"grok",
- "conditions":{
- "fields":{
- "type":[
- "hdfs_datanode",
- "hdfs_journalnode",
- "hdfs_secondarynamenode",
- "hdfs_namenode",
- "hdfs_zkfc",
- "hdfs_nfs3"
- ]
- }
- },
- "log4j_format":"%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n",
- "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
- "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}%{SPACE}\\(%{JAVAFILE:file}:%{JAVAMETHOD:method}\\(%{INT:line_number}\\)\\)%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
- "post_map_values":{
- "logtime":{
- "map_date":{
- "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
- }
- }
- }
- },
- {
- "filter":"grok",
- "conditions":{
- "fields":{
- "type":[
- "hdfs_audit"
- ]
- }
- },
- "log4j_format":"%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n",
- "multiline_pattern":"^(%{TIMESTAMP_ISO8601:evtTime})",
- "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:evtTime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}:%{SPACE}%{GREEDYDATA:log_message}",
- "post_map_values":{
- "evtTime":{
- "map_date":{
- "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
- }
- }
- }
- },
- {
- "filter":"keyvalue",
- "sort_order":1,
- "conditions":{
- "fields":{
- "type":[
- "hdfs_audit"
- ]
- }
- },
- "source_field":"log_message",
- "value_split":"=",
- "field_split":"\t",
- "post_map_values":{
- "src":{
- "map_fieldname":{
- "new_fieldname":"resource"
- }
- },
- "ip":{
- "map_fieldname":{
- "new_fieldname":"cliIP"
- }
- },
- "allowed":[
- {
- "map_fieldvalue":{
- "pre_value":"true",
- "post_value":"1"
- }
- },
- {
- "map_fieldvalue":{
- "pre_value":"false",
- "post_value":"0"
- }
- },
- {
- "map_fieldname":{
- "new_fieldname":"result"
- }
- }
- ],
- "cmd":{
- "map_fieldname":{
- "new_fieldname":"action"
- }
- },
- "proto":{
- "map_fieldname":{
- "new_fieldname":"cliType"
- }
- },
- "callerContext":{
- "map_fieldname":{
- "new_fieldname":"req_caller_id"
- }
- }
- }
- },
- {
- "filter":"grok",
- "sort_order":2,
- "source_field":"ugi",
- "remove_source_field":"false",
- "conditions":{
- "fields":{
- "type":[
- "hdfs_audit"
- ]
- }
- },
- "message_pattern":"%{USERNAME:p_user}.+auth:%{USERNAME:p_authType}.+via %{USERNAME:k_user}.+auth:%{USERNAME:k_authType}|%{USERNAME:user}.+auth:%{USERNAME:authType}|%{USERNAME:x_user}",
- "post_map_values":{
- "user":{
- "map_fieldname":{
- "new_fieldname":"reqUser"
- }
- },
- "x_user":{
- "map_fieldname":{
- "new_fieldname":"reqUser"
- }
- },
- "p_user":{
- "map_fieldname":{
- "new_fieldname":"reqUser"
- }
- },
- "k_user":{
- "map_fieldname":{
- "new_fieldname":"proxyUsers"
- }
- },
- "p_authType":{
- "map_fieldname":{
- "new_fieldname":"authType"
- }
- },
- "k_authType":{
- "map_fieldname":{
- "new_fieldname":"proxyAuthType"
- }
- }
- }
- }
- ]
- }
- </value>
- <value-attributes>
- <type>content</type>
- <show-property-name>false</show-property-name>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
-</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/ab4b864c/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hdfs-site.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hdfs-site.xml
deleted file mode 100644
index 689b6d08..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/hdfs-site.xml
+++ /dev/null
@@ -1,632 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<!-- Put site-specific property overrides in this file. -->
-<configuration supports_final="true">
- <!-- file system properties -->
- <property>
- <name>dfs.namenode.name.dir</name>
- <!-- cluster variant -->
- <value>/hadoop/hdfs/namenode</value>
- <display-name>NameNode directories</display-name>
- <description>Determines where on the local filesystem the DFS name node
- should store the name table. If this is a comma-delimited list
- of directories then the name table is replicated in all of the
- directories, for redundancy. </description>
- <final>true</final>
- <value-attributes>
- <type>directories</type>
- <overridable>false</overridable>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.support.append</name>
- <value>true</value>
- <description>to enable dfs append</description>
- <final>true</final>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.webhdfs.enabled</name>
- <value>true</value>
- <display-name>WebHDFS enabled</display-name>
- <description>Whether to enable WebHDFS feature</description>
- <final>true</final>
- <value-attributes>
- <type>boolean</type>
- <overridable>false</overridable>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.datanode.failed.volumes.tolerated</name>
- <value>0</value>
- <description> Number of failed disks a DataNode would tolerate before it stops offering service</description>
- <final>true</final>
- <display-name>DataNode failed disk tolerance</display-name>
- <value-attributes>
- <type>int</type>
- <minimum>0</minimum>
- <maximum>2</maximum>
- <increment-step>1</increment-step>
- </value-attributes>
- <depends-on>
- <property>
- <type>hdfs-site</type>
- <name>dfs.datanode.data.dir</name>
- </property>
- </depends-on>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.datanode.data.dir</name>
- <value>/hadoop/hdfs/data</value>
- <display-name>DataNode directories</display-name>
- <description>Determines where on the local filesystem an DFS data node
- should store its blocks. If this is a comma-delimited
- list of directories, then data will be stored in all named
- directories, typically on different devices.
- Directories that do not exist are ignored.
- </description>
- <final>true</final>
- <value-attributes>
- <type>directories</type>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.hosts.exclude</name>
- <value>/etc/hadoop/conf/dfs.exclude</value>
- <description>Names a file that contains a list of hosts that are
- not permitted to connect to the namenode. The full pathname of the
- file must be specified. If the value is empty, no hosts are
- excluded.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <!--
- <property>
- <name>dfs.hosts</name>
- <value>/etc/hadoop/conf/dfs.include</value>
- <description>Names a file that contains a list of hosts that are
- permitted to connect to the namenode. The full pathname of the file
- must be specified. If the value is empty, all hosts are
- permitted.</description>
- </property>
- -->
- <property>
- <name>dfs.namenode.checkpoint.dir</name>
- <value>/hadoop/hdfs/namesecondary</value>
- <display-name>SecondaryNameNode Checkpoint directories</display-name>
- <description>Determines where on the local filesystem the DFS secondary
- name node should store the temporary images to merge.
- If this is a comma-delimited list of directories then the image is
- replicated in all of the directories for redundancy.
- </description>
- <value-attributes>
- <type>directories</type>
- <overridable>false</overridable>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.namenode.checkpoint.edits.dir</name>
- <value>${dfs.namenode.checkpoint.dir}</value>
- <description>Determines where on the local filesystem the DFS secondary
- name node should store the temporary edits to merge.
- If this is a comma-delimited list of directories then the edits are
- replicated in all of the directories for redundancy.
- Default value is same as dfs.namenode.checkpoint.dir
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.namenode.checkpoint.period</name>
- <value>21600</value>
- <display-name>HDFS Maximum Checkpoint Delay</display-name>
- <description>The number of seconds between two periodic checkpoints.</description>
- <value-attributes>
- <type>int</type>
- <unit>seconds</unit>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.namenode.checkpoint.txns</name>
- <value>1000000</value>
- <description>The Secondary NameNode or CheckpointNode will create a checkpoint
- of the namespace every 'dfs.namenode.checkpoint.txns' transactions,
- regardless of whether 'dfs.namenode.checkpoint.period' has expired.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.replication.max</name>
- <value>50</value>
- <description>Maximal block replication.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.replication</name>
- <value>3</value>
- <display-name>Block replication</display-name>
- <description>Default block replication.
- </description>
- <value-attributes>
- <type>int</type>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.heartbeat.interval</name>
- <value>3</value>
- <description>Determines datanode heartbeat interval in seconds.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.namenode.safemode.threshold-pct</name>
- <value>0.999</value>
- <description>
- Specifies the percentage of blocks that should satisfy
- the minimal replication requirement defined by dfs.namenode.replication.min.
- Values less than or equal to 0 mean not to start in safe mode.
- Values greater than 1 will make safe mode permanent.
- </description>
- <display-name>Minimum replicated blocks %</display-name>
- <value-attributes>
- <type>float</type>
- <minimum>0.990</minimum>
- <maximum>1.000</maximum>
- <increment-step>0.001</increment-step>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.datanode.balance.bandwidthPerSec</name>
- <value>6250000</value>
- <description>
- Specifies the maximum amount of bandwidth that each datanode
- can utilize for the balancing purpose in term of
- the number of bytes per second.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.https.port</name>
- <value>50470</value>
- <description>
- This property is used by HftpFileSystem.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.datanode.address</name>
- <value>0.0.0.0:50010</value>
- <description>
- The datanode server address and port for data transfer.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.datanode.http.address</name>
- <value>0.0.0.0:50075</value>
- <description>
- The datanode http server address and port.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.datanode.https.address</name>
- <value>0.0.0.0:50475</value>
- <description>
- The datanode https server address and port.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.blocksize</name>
- <value>134217728</value>
- <description>The default block size for new files.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.namenode.http-address</name>
- <value>localhost:50070</value>
- <description>The name of the default file system. Either the
- literal string "local" or a host:port for HDFS.</description>
- <final>true</final>
- <on-ambari-upgrade add="false"/>
- </property>
- <property>
- <name>dfs.namenode.rpc-address</name>
- <value>localhost:8020</value>
- <description>RPC address that handles all clients requests.</description>
- <on-ambari-upgrade add="false"/>
- </property>
- <property>
- <name>dfs.datanode.du.reserved</name>
- <!-- cluster variant -->
- <value>1073741824</value>
- <display-name>Reserved space for HDFS</display-name>
- <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
- </description>
- <value-attributes>
- <type>int</type>
- <unit>bytes</unit>
- </value-attributes>
- <depends-on>
- <property>
- <type>hdfs-site</type>
- <name>dfs.datanode.data.dir</name>
- </property>
- </depends-on>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.datanode.ipc.address</name>
- <value>0.0.0.0:8010</value>
- <description>
- The datanode ipc server address and port.
- If the port is 0 then the server will start on a free port.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.blockreport.initialDelay</name>
- <value>120</value>
- <description>Delay for first block report in seconds.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.datanode.max.transfer.threads</name>
- <value>1024</value>
- <description>Specifies the maximum number of threads to use for transferring data in and out of the datanode.</description>
- <display-name>DataNode max data transfer threads</display-name>
- <value-attributes>
- <type>int</type>
- <minimum>0</minimum>
- <maximum>48000</maximum>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <!-- Permissions configuration -->
- <property>
- <name>fs.permissions.umask-mode</name>
- <value>022</value>
- <description>
- The octal umask used when creating files and directories.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.permissions.enabled</name>
- <value>true</value>
- <description>
- If "true", enable permission checking in HDFS.
- If "false", permission checking is turned off,
- but all other behavior is unchanged.
- Switching from one parameter value to the other does not change the mode,
- owner or group of files or directories.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.permissions.superusergroup</name>
- <value>hdfs</value>
- <description>The name of the group of super-users.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.namenode.handler.count</name>
- <value>100</value>
- <description>Added to grow Queue size so that more client connections are allowed</description>
- <display-name>NameNode Server threads</display-name>
- <value-attributes>
- <type>int</type>
- <minimum>1</minimum>
- <maximum>200</maximum>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.block.access.token.enable</name>
- <value>true</value>
- <description>
- If "true", access tokens are used as capabilities for accessing datanodes.
- If "false", no access tokens are checked on accessing datanodes.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <!-- cluster variant -->
- <name>dfs.namenode.secondary.http-address</name>
- <value>localhost:50090</value>
- <description>Address of secondary namenode web server</description>
- <on-ambari-upgrade add="false"/>
- </property>
- <property>
- <name>dfs.namenode.https-address</name>
- <value>localhost:50470</value>
- <description>The https address where namenode binds</description>
- <on-ambari-upgrade add="false"/>
- </property>
- <property>
- <name>dfs.datanode.data.dir.perm</name>
- <value>750</value>
- <display-name>DataNode directories permission</display-name>
- <description>The permissions that should be there on dfs.datanode.data.dir
- directories. The datanode will not come up if the permissions are
- different on existing dfs.datanode.data.dir directories. If the directories
- don't exist, they will be created with this permission.</description>
- <value-attributes>
- <type>int</type>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.namenode.accesstime.precision</name>
- <value>0</value>
- <display-name>Access time precision</display-name>
- <description>The access time for HDFS file is precise up to this value.
- The default value is 1 hour. Setting a value of 0 disables
- access times for HDFS.
- </description>
- <value-attributes>
- <type>int</type>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.cluster.administrators</name>
- <value> hdfs</value>
- <description>ACL for who all can view the default servlets in the HDFS</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.namenode.avoid.read.stale.datanode</name>
- <value>true</value>
- <description>
- Indicate whether or not to avoid reading from stale datanodes whose
- heartbeat messages have not been received by the namenode for more than a
- specified time interval.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.namenode.avoid.write.stale.datanode</name>
- <value>true</value>
- <description>
- Indicate whether or not to avoid writing to stale datanodes whose
- heartbeat messages have not been received by the namenode for more than a
- specified time interval.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.namenode.write.stale.datanode.ratio</name>
- <value>1.0f</value>
- <description>When the ratio of number stale datanodes to total datanodes marked is greater
- than this ratio, stop avoiding writing to stale nodes so as to prevent causing hotspots.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.namenode.stale.datanode.interval</name>
- <value>30000</value>
- <description>Datanode is stale after not getting a heartbeat in this interval in ms</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.journalnode.http-address</name>
- <value>0.0.0.0:8480</value>
- <description>The address and port the JournalNode web UI listens on.
- If the port is 0 then the server will start on a free port. </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.journalnode.https-address</name>
- <value>0.0.0.0:8481</value>
- <description>The address and port the JournalNode HTTPS server listens on.
- If the port is 0 then the server will start on a free port. </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.journalnode.edits.dir</name>
- <value>/hadoop/hdfs/journalnode</value>
- <description>The path where the JournalNode daemon will store its local state. </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <!-- HDFS Short-Circuit Local Reads -->
- <property>
- <name>dfs.client.read.shortcircuit</name>
- <value>true</value>
- <display-name>HDFS Short-circuit read</display-name>
- <description>
- This configuration parameter turns on short-circuit local reads.
- </description>
- <value-attributes>
- <type>boolean</type>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.domain.socket.path</name>
- <value>/var/lib/hadoop-hdfs/dn_socket</value>
- <description>
- This is a path to a UNIX domain socket that will be used for communication between the DataNode and local HDFS clients.
- If the string "_PORT" is present in this path, it will be replaced by the TCP port of the DataNode.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.client.read.shortcircuit.streams.cache.size</name>
- <value>4096</value>
- <description>
- The DFSClient maintains a cache of recently opened file descriptors. This
- parameter controls the size of that cache. Setting this higher will use
- more file descriptors, but potentially provide better performance on
- workloads involving lots of seeks.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.namenode.name.dir.restore</name>
- <value>true</value>
- <description>Set to true to enable NameNode to attempt recovering a previously failed dfs.namenode.name.dir.
- When enabled, a recovery of any failed directory is attempted during checkpoint.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.http.policy</name>
- <value>HTTP_ONLY</value>
- <description>
- Decide if HTTPS(SSL) is supported on HDFS This configures the HTTP endpoint for HDFS daemons:
- The following values are supported: - HTTP_ONLY : Service is provided only on http - HTTPS_ONLY :
- Service is provided only on https - HTTP_AND_HTTPS : Service is provided both on http and https
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
-
- <!-- These configs were inherited from HDP 2.1 -->
- <property>
- <name>dfs.namenode.audit.log.async</name>
- <value>true</value>
- <description>Whether to enable async auditlog</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.namenode.fslock.fair</name>
- <value>false</value>
- <description>Whether fsLock is fair</description>
- <on-ambari-upgrade add="true"/>
- </property>
-
- <!-- These configs were inherited from HDP 2.2 -->
- <property>
- <name>dfs.namenode.startup.delay.block.deletion.sec</name>
- <value>3600</value>
- <description>
- The delay in seconds at which we will pause the blocks deletion
- after Namenode startup. By default it's disabled.
- In the case a directory has large number of directories and files are
- deleted, suggested delay is one hour to give the administrator enough time
- to notice large number of pending deletion blocks and take corrective
- action.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.client.retry.policy.enabled</name>
- <value>false</value>
- <description>Enables HDFS client retry in the event of a NameNode failure.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.content-summary.limit</name>
- <value>5000</value>
- <description>Dfs content summary limit.</description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.encryption.key.provider.uri</name>
- <description>
- The KeyProvider to use when interacting with encryption keys used
- when reading and writing to an encryption zone.
- </description>
- <value/>
- <value-attributes>
- <empty-value-valid>true</empty-value-valid>
- </value-attributes>
- <depends-on>
- <property>
- <type>hadoop-env</type>
- <name>keyserver_host</name>
- </property>
- <property>
- <type>hadoop-env</type>
- <name>keyserver_port</name>
- </property>
- <property>
- <type>kms-env</type>
- <name>kms_port</name>
- </property>
- <property>
- <type>ranger-kms-site</type>
- <name>ranger.service.https.attrib.ssl.enabled</name>
- </property>
- </depends-on>
- <on-ambari-upgrade add="false"/>
- </property>
-
- <!-- These configs were inherited from HDP 2.3 -->
- <property>
- <name>nfs.file.dump.dir</name>
- <value>/tmp/.hdfs-nfs</value>
- <display-name>NFSGateway dump directory</display-name>
- <description>
- This directory is used to temporarily save out-of-order writes before
- writing to HDFS. For each file, the out-of-order writes are dumped after
- they are accumulated to exceed certain threshold (e.g., 1MB) in memory.
- One needs to make sure the directory has enough space.
- </description>
- <value-attributes>
- <type>directory</type>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>nfs.exports.allowed.hosts</name>
- <value>* rw</value>
- <description>
- By default, the export can be mounted by any client. To better control the access,
- users can update the following property. The value string contains machine name and access privilege,
- separated by whitespace characters. Machine name format can be single host, wildcards, and IPv4
- networks.The access privilege uses rw or ro to specify readwrite or readonly access of the machines
- to exports. If the access privilege is not provided, the default is read-only. Entries are separated
- by ";". For example: "192.168.0.0/22 rw ; host*.example.com ; host1.test.org ro;".
- </description>
- <display-name>Allowed hosts</display-name>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.encrypt.data.transfer.cipher.suites</name>
- <value>AES/CTR/NoPadding</value>
- <description>
- This value may be either undefined or AES/CTR/NoPadding. If defined, then
- dfs.encrypt.data.transfer uses the specified cipher suite for data encryption.
- If not defined, then only the algorithm specified in dfs.encrypt.data.transfer.algorithm
- is used. By default, the property is not defined.
- </description>
- <on-ambari-upgrade add="true"/>
- </property>
- <property>
- <name>dfs.namenode.inode.attributes.provider.class</name>
- <description>Enable ranger hdfs plugin</description>
- <depends-on>
- <property>
- <type>ranger-hdfs-plugin-properties</type>
- <name>ranger-hdfs-plugin-enabled</name>
- </property>
- </depends-on>
- <value-attributes>
- <overridable>false</overridable>
- </value-attributes>
- <on-ambari-upgrade add="true"/>
- </property>
-</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/ab4b864c/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/ranger-hdfs-audit.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/ranger-hdfs-audit.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/ranger-hdfs-audit.xml
deleted file mode 100644
index fd41817..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/ranger-hdfs-audit.xml
+++ /dev/null
@@ -1,217 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
- <!-- These configs were inherited from HDP 2.3 -->
- <property>
- <name>xasecure.audit.is.enabled</name>
- <value>true</value>
- <description>Is Audit enabled?</description>
- <on-ambari-upgrade add="false"/>
- </property>
- <property>
- <name>xasecure.audit.destination.db</name>
- <value>false</value>
- <display-name>Audit to DB</display-name>
- <description>Is Audit to DB enabled?</description>
- <value-attributes>
- <type>boolean</type>
- </value-attributes>
- <depends-on>
- <property>
- <type>ranger-env</type>
- <name>xasecure.audit.destination.db</name>
- </property>
- </depends-on>
- <on-ambari-upgrade add="false"/>
- </property>
- <property>
- <name>xasecure.audit.destination.db.jdbc.url</name>
- <value>{{audit_jdbc_url}}</value>
- <description>Audit DB JDBC URL</description>
- <on-ambari-upgrade add="false"/>
- </property>
- <property>
- <name>xasecure.audit.destination.db.user</name>
- <value>{{xa_audit_db_user}}</value>
- <description>Audit DB JDBC User</description>
- <on-ambari-upgrade add="false"/>
- </property>
- <property>
- <name>xasecure.audit.destination.db.password</name>
- <value>crypted</value>
- <property-type>PASSWORD</property-type>
- <description>Audit DB JDBC Password</description>
- <value-attributes>
- <type>password</type>
- </value-attributes>
- <on-ambari-upgrade add="false"/>
- </property>
- <property>
- <name>xasecure.audit.destination.db.jdbc.driver</name>
- <value>{{jdbc_driver}}</value>
- <description>Audit DB JDBC Driver</description>
- <on-ambari-upgrade add="false"/>
- </property>
- <property>
- <name>xasecure.audit.credential.provider.file</name>
- <value>jceks://file{{credential_file}}</value>
- <description>Credential file store</description>
- <on-ambari-upgrade add="false"/>
- </property>
- <property>
- <name>xasecure.audit.destination.db.batch.filespool.dir</name>
- <value>/var/log/hadoop/hdfs/audit/db/spool</value>
- <description>/var/log/hadoop/hdfs/audit/db/spool</description>
- <on-ambari-upgrade add="false"/>
- </property>
- <property>
- <name>xasecure.audit.destination.hdfs</name>
- <value>true</value>
- <display-name>Audit to HDFS</display-name>
- <description>Is Audit to HDFS enabled?</description>
- <value-attributes>
- <type>boolean</type>
- </value-attributes>
- <depends-on>
- <property>
- <type>ranger-env</type>
- <name>xasecure.audit.destination.hdfs</name>
- </property>
- </depends-on>
- <on-ambari-upgrade add="false"/>
- </property>
- <property>
- <name>xasecure.audit.destination.hdfs.dir</name>
- <value>hdfs://NAMENODE_HOSTNAME:8020/ranger/audit</value>
- <description>HDFS folder to write audit to, make sure the service user has requried permissions</description>
- <depends-on>
- <property>
- <type>ranger-env</type>
- <name>xasecure.audit.destination.hdfs.dir</name>
- </property>
- </depends-on>
- <on-ambari-upgrade add="false"/>
- </property>
- <property>
- <name>xasecure.audit.destination.hdfs.batch.filespool.dir</name>
- <value>/var/log/hadoop/hdfs/audit/hdfs/spool</value>
- <description>/var/log/hadoop/hdfs/audit/hdfs/spool</description>
- <on-ambari-upgrade add="false"/>
- </property>
- <property>
- <name>xasecure.audit.destination.solr</name>
- <value>false</value>
- <display-name>Audit to SOLR</display-name>
- <description>Is Solr audit enabled?</description>
- <value-attributes>
- <type>boolean</type>
- </value-attributes>
- <depends-on>
- <property>
- <type>ranger-env</type>
- <name>xasecure.audit.destination.solr</name>
- </property>
- </depends-on>
- <on-ambari-upgrade add="false"/>
- </property>
- <property>
- <name>xasecure.audit.destination.solr.urls</name>
- <value/>
- <description>Solr URL</description>
- <value-attributes>
- <empty-value-valid>true</empty-value-valid>
- </value-attributes>
- <depends-on>
- <property>
- <type>ranger-admin-site</type>
- <name>ranger.audit.solr.urls</name>
- </property>
- </depends-on>
- <on-ambari-upgrade add="false"/>
- </property>
- <property>
- <name>xasecure.audit.destination.solr.zookeepers</name>
- <value>NONE</value>
- <description>Solr Zookeeper string</description>
- <depends-on>
- <property>
- <type>ranger-admin-site</type>
- <name>ranger.audit.solr.zookeepers</name>
- </property>
- </depends-on>
- <on-ambari-upgrade add="false"/>
- </property>
- <property>
- <name>xasecure.audit.destination.solr.batch.filespool.dir</name>
- <value>/var/log/hadoop/hdfs/audit/solr/spool</value>
- <description>/var/log/hadoop/hdfs/audit/solr/spool</description>
- <on-ambari-upgrade add="false"/>
- </property>
- <property>
- <name>xasecure.audit.provider.summary.enabled</name>
- <value>false</value>
- <display-name>Audit provider summary enabled</display-name>
- <description>Enable Summary audit?</description>
- <value-attributes>
- <type>boolean</type>
- </value-attributes>
- <on-ambari-upgrade add="false"/>
- </property>
-
- <!-- These configs are deleted in HDP 2.5. -->
- <property>
- <name>xasecure.audit.destination.db</name>
- <deleted>true</deleted>
- <on-ambari-upgrade add="false"/>
- </property>
- <property>
- <name>xasecure.audit.destination.db.jdbc.url</name>
- <deleted>true</deleted>
- <on-ambari-upgrade add="false"/>
- </property>
- <property>
- <name>xasecure.audit.destination.db.user</name>
- <deleted>true</deleted>
- <on-ambari-upgrade add="false"/>
- </property>
- <property>
- <name>xasecure.audit.destination.db.password</name>
- <deleted>true</deleted>
- <on-ambari-upgrade add="false"/>
- </property>
- <property>
- <name>xasecure.audit.destination.db.jdbc.driver</name>
- <deleted>true</deleted>
- <on-ambari-upgrade add="false"/>
- </property>
- <property>
- <name>xasecure.audit.credential.provider.file</name>
- <deleted>true</deleted>
- <on-ambari-upgrade add="false"/>
- </property>
- <property>
- <name>xasecure.audit.destination.db.batch.filespool.dir</name>
- <deleted>true</deleted>
- <on-ambari-upgrade add="false"/>
- </property>
-
-</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/ab4b864c/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/ranger-hdfs-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/ranger-hdfs-plugin-properties.xml b/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/ranger-hdfs-plugin-properties.xml
deleted file mode 100644
index b31742c..0000000
--- a/ambari-server/src/main/resources/common-services/HDFS/3.0.0/configuration/ranger-hdfs-plugin-properties.xml
+++ /dev/null
@@ -1,98 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="true">
- <!-- These configs were inherited from HDP 2.2 -->
- <property>
- <name>policy_user</name>
- <value>ambari-qa</value>
- <display-name>Policy user for HDFS</display-name>
- <description>This user must be system user and also present at Ranger
- admin portal</description>
- <on-ambari-upgrade add="false"/>
- </property>
- <property>
- <name>hadoop.rpc.protection</name>
- <value/>
- <description>Used for repository creation on ranger admin
- </description>
- <value-attributes>
- <empty-value-valid>true</empty-value-valid>
- </value-attributes>
- <on-ambari-upgrade add="false"/>
- </property>
- <property>
- <name>common.name.for.certificate</name>
- <value/>
- <description>Common name for certificate, this value should match what is specified in repo within ranger admin</description>
- <value-attributes>
- <empty-value-valid>true</empty-value-valid>
- </value-attributes>
- <on-ambari-upgrade add="false"/>
- </property>
- <property>
- <name>ranger-hdfs-plugin-enabled</name>
- <value>No</value>
- <display-name>Enable Ranger for HDFS</display-name>
- <description>Enable ranger hdfs plugin</description>
- <depends-on>
- <property>
- <type>ranger-env</type>
- <name>ranger-hdfs-plugin-enabled</name>
- </property>
- </depends-on>
- <value-attributes>
- <type>boolean</type>
- <overridable>false</overridable>
- </value-attributes>
- <on-ambari-upgrade add="false"/>
- </property>
- <property>
- <name>REPOSITORY_CONFIG_USERNAME</name>
- <value>hadoop</value>
- <display-name>Ranger repository config user</display-name>
- <description>Used for repository creation on ranger admin
- </description>
- <on-ambari-upgrade add="false"/>
- </property>
- <property>
- <name>REPOSITORY_CONFIG_PASSWORD</name>
- <value>hadoop</value>
- <display-name>Ranger repository config password</display-name>
- <property-type>PASSWORD</property-type>
- <description>Used for repository creation on ranger admin
- </description>
- <value-attributes>
- <type>password</type>
- </value-attributes>
- <on-ambari-upgrade add="false"/>
- </property>
-
- <!-- These configs were inherited from HDP 2.5 -->
- <property>
- <name>hadoop.rpc.protection</name>
- <value>authentication</value>
- <description>Used for repository creation on ranger admin</description>
- <value-attributes>
- <empty-value-valid>true</empty-value-valid>
- </value-attributes>
- <on-ambari-upgrade add="false" />
- </property>
-</configuration>