You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by dm...@apache.org on 2017/01/20 10:19:10 UTC

[08/46] ambari git commit: AMBARI-18739. Perf: Create Rolling and Express Upgrade Packs (dlysnichenko)

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/hadoop-metrics2.properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/hadoop-metrics2.properties.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/hadoop-metrics2.properties.xml
deleted file mode 100644
index 4aadb83..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/hadoop-metrics2.properties.xml
+++ /dev/null
@@ -1,125 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <!-- hadoop-metrics2.properties -->
-  <property>
-    <name>content</name>
-    <display-name>hadoop-metrics2.properties template</display-name>
-    <description>This is the jinja template for hadoop-metrics2.properties file</description>
-    <value>
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# syntax: [prefix].[source|sink|jmx].[instance].[options]
-# See package.html for org.apache.hadoop.metrics2 for details
-
-{% if has_ganglia_server %}
-*.period=60
-
-*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
-*.sink.ganglia.period=10
-
-# default for supportsparse is false
-*.sink.ganglia.supportsparse=true
-
-.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
-.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
-
-# Hook up to the server
-namenode.sink.ganglia.servers={{ganglia_server_host}}:8661
-datanode.sink.ganglia.servers={{ganglia_server_host}}:8659
-jobtracker.sink.ganglia.servers={{ganglia_server_host}}:8662
-tasktracker.sink.ganglia.servers={{ganglia_server_host}}:8658
-maptask.sink.ganglia.servers={{ganglia_server_host}}:8660
-reducetask.sink.ganglia.servers={{ganglia_server_host}}:8660
-resourcemanager.sink.ganglia.servers={{ganglia_server_host}}:8664
-nodemanager.sink.ganglia.servers={{ganglia_server_host}}:8657
-historyserver.sink.ganglia.servers={{ganglia_server_host}}:8666
-journalnode.sink.ganglia.servers={{ganglia_server_host}}:8654
-nimbus.sink.ganglia.servers={{ganglia_server_host}}:8649
-supervisor.sink.ganglia.servers={{ganglia_server_host}}:8650
-
-resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
-
-{% endif %}
-
-{% if has_metric_collector %}
-
-*.period={{metrics_collection_period}}
-*.sink.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
-*.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
-*.sink.timeline.period={{metrics_collection_period}}
-*.sink.timeline.sendInterval={{metrics_report_interval}}000
-*.sink.timeline.slave.host.name={{hostname}}
-*.sink.timeline.zookeeper.quorum={{zookeeper_quorum}}
-*.sink.timeline.protocol={{metric_collector_protocol}}
-*.sink.timeline.port={{metric_collector_port}}
-
-# HTTPS properties
-*.sink.timeline.truststore.path = {{metric_truststore_path}}
-*.sink.timeline.truststore.type = {{metric_truststore_type}}
-*.sink.timeline.truststore.password = {{metric_truststore_password}}
-
-datanode.sink.timeline.collector.hosts={{ams_collector_hosts}}
-namenode.sink.timeline.collector.hosts={{ams_collector_hosts}}
-resourcemanager.sink.timeline.collector.hosts={{ams_collector_hosts}}
-nodemanager.sink.timeline.collector.hosts={{ams_collector_hosts}}
-jobhistoryserver.sink.timeline.collector.hosts={{ams_collector_hosts}}
-journalnode.sink.timeline.collector.hosts={{ams_collector_hosts}}
-maptask.sink.timeline.collector.hosts={{ams_collector_hosts}}
-reducetask.sink.timeline.collector.hosts={{ams_collector_hosts}}
-applicationhistoryserver.sink.timeline.collector.hosts={{ams_collector_hosts}}
-
-resourcemanager.sink.timeline.tagsForPrefix.yarn=Queue
-
-{% if is_nn_client_port_configured %}
-# Namenode rpc ports customization
-namenode.sink.timeline.metric.rpc.client.port={{nn_rpc_client_port}}
-{% endif %}
-{% if is_nn_dn_port_configured %}
-namenode.sink.timeline.metric.rpc.datanode.port={{nn_rpc_dn_port}}
-{% endif %}
-{% if is_nn_healthcheck_port_configured %}
-namenode.sink.timeline.metric.rpc.healthcheck.port={{nn_rpc_healthcheck_port}}
-{% endif %}
-
-{% endif %}
-    </value>
-    <value-attributes>
-      <type>content</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/hadoop-policy.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/hadoop-policy.xml
deleted file mode 100644
index 9193bad..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/hadoop-policy.xml
+++ /dev/null
@@ -1,130 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<!-- Put site-specific property overrides in this file. -->
-<configuration supports_final="true">
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientProtocol, which is used by user code
-    via the DistributedFileSystem.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>security.client.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
-    for block recovery.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>security.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for DatanodeProtocol, which is used by datanodes to
-    communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>security.inter.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
-    for updating generation timestamp.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>security.namenode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for NamenodeProtocol, the protocol used by the secondary
-    namenode to communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>security.inter.tracker.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterTrackerProtocol, used by the tasktrackers to
-    communicate with the jobtracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>security.job.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for JobSubmissionProtocol, used by job clients to
-    communciate with the jobtracker for job submission, querying job status etc.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>security.job.task.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
-    tasks to communicate with the parent tasktracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>security.admin.operations.protocol.acl</name>
-    <value>hadoop</value>
-    <description>ACL for AdminOperationsProtocol. Used for admin commands.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>security.refresh.usertogroups.mappings.protocol.acl</name>
-    <value>hadoop</value>
-    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
-    users mappings. The ACL is a comma-separated list of user and
-    group names. The user and group list is separated by a blank. For
-    e.g. "alice,bob users,wheel".  A special value of "*" means all
-    users are allowed.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>security.refresh.policy.protocol.acl</name>
-    <value>hadoop</value>
-    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
-    dfsadmin and mradmin commands to refresh the security policy in-effect.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/hdfs-alert-config.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/hdfs-alert-config.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/hdfs-alert-config.xml
deleted file mode 100644
index 392eea7..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/hdfs-alert-config.xml
+++ /dev/null
@@ -1,80 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
-
-        http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
--->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude" supports_final="true">
-
-    <property>
-        <name>alert.behavior.type</name>
-        <value>percentage</value>
-        <description>
-            This property describes type of alert behaviour.
-            There are three types percentage, timeout, flip.
-        </description>
-        <on-ambari-upgrade add="false"/>
-    </property>
-
-
-    <property>
-        <name>alert.success.percentage</name>
-        <value>100</value>
-        <description>
-            This property will be actual only when alert.behaviour.type
-            set to "percentage". Here you should set percent of successful
-            alert checks.
-        </description>
-        <on-ambari-upgrade add="false"/>
-    </property>
-
-
-    <property>
-        <name>alert.timeout.return.value</name>
-        <value>false</value>
-        <description>
-            This property will be actual only when alert.behaviour.type
-            set to "timeout". Here you should set result which alert will
-            return after timeout, false|true|none.
-        </description>
-        <on-ambari-upgrade add="false"/>
-    </property>
-
-    <property>
-        <name>alert.timeout.secs</name>
-        <value>120</value>
-        <description>
-            This property will be actual only when alert.behaviour.type
-            set to "timeout". Here you should set number of seconds for
-            alert to sleep.
-        </description>
-        <on-ambari-upgrade add="false"/>
-    </property>
-
-
-    <property>
-        <name>alert.flip.interval.mins</name>
-        <value>3</value>
-        <description>
-            This property will be actual only when alert.behaviour.type
-            set to "flip". Here you should set number of minutes at which
-            the alert should flip from true|false.
-        </description>
-        <on-ambari-upgrade add="false"/>
-    </property>
-
-
-</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/hdfs-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/hdfs-log4j.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/hdfs-log4j.xml
deleted file mode 100644
index a4eacc2..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/hdfs-log4j.xml
+++ /dev/null
@@ -1,225 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <property>
-    <name>content</name>
-    <display-name>hdfs-log4j template</display-name>
-    <description>Custom log4j.properties</description>
-    <value>
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#  http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-
-# Define some default values that can be overridden by system properties
-# To change daemon root logger use hadoop_root_logger in hadoop-env
-hadoop.root.logger=INFO,console
-hadoop.log.dir=.
-hadoop.log.file=hadoop.log
-
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hadoop.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshhold=ALL
-
-#
-# Daily Rolling File Appender
-#
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-
-#
-# TaskLog Appender
-#
-
-#Default values
-hadoop.tasklog.taskid=null
-hadoop.tasklog.iscleanup=false
-hadoop.tasklog.noKeepSplits=4
-hadoop.tasklog.totalLogFileSize=100
-hadoop.tasklog.purgeLogSplits=true
-hadoop.tasklog.logsRetainHours=12
-
-log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
-log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
-log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
-log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
-
-log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
-log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-
-#
-#Security audit appender
-#
-hadoop.security.logger=INFO,console
-hadoop.security.log.maxfilesize=256MB
-hadoop.security.log.maxbackupindex=20
-log4j.category.SecurityLogger=${hadoop.security.logger}
-hadoop.security.log.file=SecurityAuth.audit
-log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
-log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
-log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
-
-log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
-log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
-log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
-log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
-log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
-
-#
-# hdfs audit logging
-#
-hdfs.audit.logger=INFO,console
-log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
-log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
-log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
-log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
-log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
-
-#
-# NameNode metrics logging.
-# The default is to retain two namenode-metrics.log files up to 64MB each.
-#
-namenode.metrics.logger=INFO,NullAppender
-log4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}
-log4j.additivity.NameNodeMetricsLog=false
-log4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender
-log4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log
-log4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout
-log4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n
-log4j.appender.NNMETRICSRFA.MaxBackupIndex=1
-log4j.appender.NNMETRICSRFA.MaxFileSize=64MB
-
-#
-# mapred audit logging
-#
-mapred.audit.logger=INFO,console
-log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
-log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
-log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
-log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
-log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
-
-#
-# Rolling File Appender
-#
-
-log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Logfile size and and 30-day backups
-log4j.appender.RFA.MaxFileSize=256MB
-log4j.appender.RFA.MaxBackupIndex=10
-
-log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
-log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-# Custom Logging levels
-
-hadoop.metrics.log.level=INFO
-#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
-#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
-#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
-log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
-
-# Jets3t library
-log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
-
-#
-# Null Appender
-# Trap security logger on the hadoop client side
-#
-log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
-
-# Removes "deprecated" messages
-log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
-
-#
-# HDFS block state change log from block manager
-#
-# Uncomment the following to suppress normal block state change
-# messages from BlockManager in NameNode.
-#log4j.logger.BlockStateChange=WARN
-    </value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/hdfs-logsearch-conf.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/hdfs-logsearch-conf.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/hdfs-logsearch-conf.xml
deleted file mode 100644
index 6540c86..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/hdfs-logsearch-conf.xml
+++ /dev/null
@@ -1,248 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <property>
-    <name>service_name</name>
-    <display-name>Service name</display-name>
-    <description>Service name for Logsearch Portal (label)</description>
-    <value>HDFS</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>component_mappings</name>
-    <display-name>Component mapping</display-name>
-    <description>Logsearch component logid mapping list (e.g.: COMPONENT1:logid1,logid2;COMPONENT2:logid3)</description>
-    <value>NAMENODE:hdfs_namenode;DATANODE:hdfs_datanode;SECONDARY_NAMENODE:hdfs_secondarynamenode;JOURNALNODE:hdfs_journalnode;ZKFC:hdfs_zkfc;NFS_GATEWAY:hdfs_nfs3</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>content</name>
-    <display-name>Logfeeder Config</display-name>
-    <description>Metadata jinja template for Logfeeder which contains grok patterns for reading service specific logs.</description>
-    <value>
-{
-  "input":[
-    {
-      "type":"hdfs_datanode",
-      "rowtype":"service",
-      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-datanode-*.log"
-    },
-    {
-      "type":"hdfs_namenode",
-      "rowtype":"service",
-      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-namenode-*.log"
-    },
-    {
-      "type":"hdfs_journalnode",
-      "rowtype":"service",
-      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-journalnode-*.log"
-    },
-    {
-      "type":"hdfs_secondarynamenode",
-      "rowtype":"service",
-      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-secondarynamenode-*.log"
-    },
-    {
-      "type":"hdfs_zkfc",
-      "rowtype":"service",
-      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-zkfc-*.log"
-    },
-    {
-      "type":"hdfs_nfs3",
-      "rowtype":"service",
-      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hadoop-{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}-nfs3-*.log"
-    },
-    {
-      "type":"hdfs_audit",
-      "rowtype":"audit",
-      "is_enabled":"true",
-      "add_fields":{
-        "logType":"HDFSAudit",
-        "enforcer":"hadoop-acl",
-        "repoType":"1",
-        "repo":"hdfs"
-      },
-      "path":"{{default('/configurations/hadoop-env/hdfs_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/hadoop-env/hdfs_user', 'hdfs')}}/hdfs-audit.log"
-    }
-   ],
-  "filter":[
-    {
-      "filter":"grok",
-      "conditions":{
-        "fields":{
-          "type":[
-            "hdfs_datanode",
-            "hdfs_journalnode",
-            "hdfs_secondarynamenode",
-            "hdfs_namenode",
-            "hdfs_zkfc",
-            "hdfs_nfs3"
-          ]
-         }
-       },
-      "log4j_format":"%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n",
-      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
-      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}%{SPACE}\\(%{JAVAFILE:file}:%{JAVAMETHOD:method}\\(%{INT:line_number}\\)\\)%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
-      "post_map_values":{
-        "logtime":{
-          "map_date":{
-            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
-          }
-        }
-       }
-     },
-    {
-      "filter":"grok",
-      "conditions":{
-        "fields":{
-          "type":[
-            "hdfs_audit"
-          ]
-         }
-       },
-      "log4j_format":"%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n",
-      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:evtTime})",
-      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:evtTime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}:%{SPACE}%{GREEDYDATA:log_message}",
-      "post_map_values":{
-        "evtTime":{
-          "map_date":{
-            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
-          }
-         }
-       }
-     },
-    {
-      "filter":"keyvalue",
-      "sort_order":1,
-      "conditions":{
-        "fields":{
-          "type":[
-            "hdfs_audit"
-          ]
-         }
-       },
-      "source_field":"log_message",
-      "value_split":"=",
-      "field_split":"\t",
-      "post_map_values":{
-        "src":{
-          "map_fieldname":{
-            "new_fieldname":"resource"
-          }
-         },
-        "ip":{
-          "map_fieldname":{
-            "new_fieldname":"cliIP"
-          }
-         },
-        "allowed":[
-          {
-            "map_fieldvalue":{
-              "pre_value":"true",
-              "post_value":"1"
-            }
-           },
-          {
-            "map_fieldvalue":{
-              "pre_value":"false",
-              "post_value":"0"
-            }
-           },
-          {
-            "map_fieldname":{
-              "new_fieldname":"result"
-            }
-           }
-         ],
-        "cmd":{
-          "map_fieldname":{
-            "new_fieldname":"action"
-          }
-         },
-        "proto":{
-          "map_fieldname":{
-            "new_fieldname":"cliType"
-          }
-         },
-        "callerContext":{
-          "map_fieldname":{
-            "new_fieldname":"req_caller_id"
-          }
-         }
-       }
-     },
-    {
-      "filter":"grok",
-      "sort_order":2,
-      "source_field":"ugi",
-      "remove_source_field":"false",
-      "conditions":{
-        "fields":{
-          "type":[
-            "hdfs_audit"
-          ]
-         }
-       },
-      "message_pattern":"%{USERNAME:p_user}.+auth:%{USERNAME:p_authType}.+via %{USERNAME:k_user}.+auth:%{USERNAME:k_authType}|%{USERNAME:user}.+auth:%{USERNAME:authType}|%{USERNAME:x_user}",
-      "post_map_values":{
-        "user":{
-          "map_fieldname":{
-            "new_fieldname":"reqUser"
-          }
-         },
-        "x_user":{
-          "map_fieldname":{
-            "new_fieldname":"reqUser"
-          }
-         },
-        "p_user":{
-          "map_fieldname":{
-            "new_fieldname":"reqUser"
-          }
-         },
-        "k_user":{
-          "map_fieldname":{
-            "new_fieldname":"proxyUsers"
-          }
-         },
-        "p_authType":{
-          "map_fieldname":{
-            "new_fieldname":"authType"
-          }
-         },
-        "k_authType":{
-          "map_fieldname":{
-            "new_fieldname":"proxyAuthType"
-          }
-         }
-       }
-     }
-   ]
- }
-    </value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/hdfs-site.xml
deleted file mode 100644
index 995632f..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/hdfs-site.xml
+++ /dev/null
@@ -1,633 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<!-- Put site-specific property overrides in this file. -->
-<configuration supports_final="true">
-  <!-- These properties exist in common services. -->
-  <!-- file system properties -->
-  <property>
-    <name>dfs.namenode.name.dir</name>
-    <!-- cluster variant -->
-    <value>/hadoop/hdfs/namenode</value>
-    <display-name>NameNode directories</display-name>
-    <description>Determines where on the local filesystem the DFS name node
-      should store the name table.  If this is a comma-delimited list
-      of directories then the name table is replicated in all of the
-      directories, for redundancy. </description>
-    <final>true</final>
-    <value-attributes>
-      <type>directories</type>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.support.append</name>
-    <value>true</value>
-    <description>to enable dfs append</description>
-    <final>true</final>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.webhdfs.enabled</name>
-    <value>true</value>
-    <display-name>WebHDFS enabled</display-name>
-    <description>Whether to enable WebHDFS feature</description>
-    <final>true</final>
-    <value-attributes>
-      <type>boolean</type>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.datanode.failed.volumes.tolerated</name>
-    <value>0</value>
-    <description> Number of failed disks a DataNode would tolerate before it stops offering service</description>
-    <final>true</final>
-    <display-name>DataNode failed disk tolerance</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>2</maximum>
-      <increment-step>1</increment-step>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>hdfs-site</type>
-        <name>dfs.datanode.data.dir</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.datanode.data.dir</name>
-    <value>/hadoop/hdfs/data</value>
-    <display-name>DataNode directories</display-name>
-    <description>Determines where on the local filesystem an DFS data node
-      should store its blocks.  If this is a comma-delimited
-      list of directories, then data will be stored in all named
-      directories, typically on different devices.
-      Directories that do not exist are ignored.
-    </description>
-    <final>true</final>
-    <value-attributes>
-      <type>directories</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.hosts.exclude</name>
-    <value>/etc/hadoop/conf/dfs.exclude</value>
-    <description>Names a file that contains a list of hosts that are
-      not permitted to connect to the namenode.  The full pathname of the
-      file must be specified.  If the value is empty, no hosts are
-      excluded.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <!--
-    <property>
-      <name>dfs.hosts</name>
-      <value>/etc/hadoop/conf/dfs.include</value>
-      <description>Names a file that contains a list of hosts that are
-      permitted to connect to the namenode. The full pathname of the file
-      must be specified.  If the value is empty, all hosts are
-      permitted.</description>
-    </property>
-  -->
-  <property>
-    <name>dfs.namenode.checkpoint.dir</name>
-    <value>/hadoop/hdfs/namesecondary</value>
-    <display-name>SecondaryNameNode Checkpoint directories</display-name>
-    <description>Determines where on the local filesystem the DFS secondary
-      name node should store the temporary images to merge.
-      If this is a comma-delimited list of directories then the image is
-      replicated in all of the directories for redundancy.
-    </description>
-    <value-attributes>
-      <type>directories</type>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.namenode.checkpoint.edits.dir</name>
-    <value>${dfs.namenode.checkpoint.dir}</value>
-    <description>Determines where on the local filesystem the DFS secondary
-      name node should store the temporary edits to merge.
-      If this is a comma-delimited list of directories then the edits are
-      replicated in all of the directories for redundancy.
-      Default value is same as dfs.namenode.checkpoint.dir
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.namenode.checkpoint.period</name>
-    <value>21600</value>
-    <display-name>HDFS Maximum Checkpoint Delay</display-name>
-    <description>The number of seconds between two periodic checkpoints.</description>
-    <value-attributes>
-      <type>int</type>
-      <unit>seconds</unit>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.namenode.checkpoint.txns</name>
-    <value>1000000</value>
-    <description>The Secondary NameNode or CheckpointNode will create a checkpoint
-      of the namespace every 'dfs.namenode.checkpoint.txns' transactions,
-      regardless of whether 'dfs.namenode.checkpoint.period' has expired.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.replication.max</name>
-    <value>50</value>
-    <description>Maximal block replication.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.replication</name>
-    <value>3</value>
-    <display-name>Block replication</display-name>
-    <description>Default block replication.
-    </description>
-    <value-attributes>
-      <type>int</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.heartbeat.interval</name>
-    <value>3</value>
-    <description>Determines datanode heartbeat interval in seconds.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.namenode.safemode.threshold-pct</name>
-    <value>0.999</value>
-    <description>
-      Specifies the percentage of blocks that should satisfy
-      the minimal replication requirement defined by dfs.namenode.replication.min.
-      Values less than or equal to 0 mean not to start in safe mode.
-      Values greater than 1 will make safe mode permanent.
-    </description>
-    <display-name>Minimum replicated blocks %</display-name>
-    <value-attributes>
-      <type>float</type>
-      <minimum>0.990</minimum>
-      <maximum>1.000</maximum>
-      <increment-step>0.001</increment-step>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.datanode.balance.bandwidthPerSec</name>
-    <value>6250000</value>
-    <description>
-      Specifies the maximum amount of bandwidth that each datanode
-      can utilize for the balancing purpose in term of
-      the number of bytes per second.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.https.port</name>
-    <value>50470</value>
-    <description>
-      This property is used by HftpFileSystem.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.datanode.address</name>
-    <value>0.0.0.0:50010</value>
-    <description>
-      The datanode server address and port for data transfer.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.datanode.http.address</name>
-    <value>0.0.0.0:50075</value>
-    <description>
-      The datanode http server address and port.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.datanode.https.address</name>
-    <value>0.0.0.0:50475</value>
-    <description>
-      The datanode https server address and port.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.blocksize</name>
-    <value>134217728</value>
-    <description>The default block size for new files.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.namenode.http-address</name>
-    <value>localhost:50070</value>
-    <description>The name of the default file system.  Either the
-      literal string "local" or a host:port for HDFS.</description>
-    <final>true</final>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.namenode.rpc-address</name>
-    <value>localhost:8020</value>
-    <description>RPC address that handles all clients requests.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.datanode.du.reserved</name>
-    <!-- cluster variant -->
-    <value>1073741824</value>
-    <display-name>Reserved space for HDFS</display-name>
-    <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
-    </description>
-    <value-attributes>
-      <type>int</type>
-      <unit>bytes</unit>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>hdfs-site</type>
-        <name>dfs.datanode.data.dir</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.datanode.ipc.address</name>
-    <value>0.0.0.0:8010</value>
-    <description>
-      The datanode ipc server address and port.
-      If the port is 0 then the server will start on a free port.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.blockreport.initialDelay</name>
-    <value>120</value>
-    <description>Delay for first block report in seconds.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.datanode.max.transfer.threads</name>
-    <value>1024</value>
-    <description>Specifies the maximum number of threads to use for transferring data in and out of the datanode.</description>
-    <display-name>DataNode max data transfer threads</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>48000</maximum>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <!-- Permissions configuration -->
-  <property>
-    <name>fs.permissions.umask-mode</name>
-    <value>022</value>
-    <description>
-      The octal umask used when creating files and directories.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.permissions.enabled</name>
-    <value>true</value>
-    <description>
-      If "true", enable permission checking in HDFS.
-      If "false", permission checking is turned off,
-      but all other behavior is unchanged.
-      Switching from one parameter value to the other does not change the mode,
-      owner or group of files or directories.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.permissions.superusergroup</name>
-    <value>hdfs</value>
-    <description>The name of the group of super-users.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.namenode.handler.count</name>
-    <value>100</value>
-    <description>Added to grow Queue size so that more client connections are allowed</description>
-    <display-name>NameNode Server threads</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>1</minimum>
-      <maximum>200</maximum>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.block.access.token.enable</name>
-    <value>true</value>
-    <description>
-      If "true", access tokens are used as capabilities for accessing datanodes.
-      If "false", no access tokens are checked on accessing datanodes.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <!-- cluster variant -->
-    <name>dfs.namenode.secondary.http-address</name>
-    <value>localhost:50090</value>
-    <description>Address of secondary namenode web server</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.namenode.https-address</name>
-    <value>localhost:50470</value>
-    <description>The https address where namenode binds</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.datanode.data.dir.perm</name>
-    <value>750</value>
-    <display-name>DataNode directories permission</display-name>
-    <description>The permissions that should be there on dfs.datanode.data.dir
-      directories. The datanode will not come up if the permissions are
-      different on existing dfs.datanode.data.dir directories. If the directories
-      don't exist, they will be created with this permission.</description>
-    <value-attributes>
-      <type>int</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.namenode.accesstime.precision</name>
-    <value>0</value>
-    <display-name>Access time precision</display-name>
-    <description>The access time for HDFS file is precise up to this value.
-      The default value is 1 hour. Setting a value of 0 disables
-      access times for HDFS.
-    </description>
-    <value-attributes>
-      <type>int</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.cluster.administrators</name>
-    <value> hdfs</value>
-    <description>ACL for who all can view the default servlets in the HDFS</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.namenode.avoid.read.stale.datanode</name>
-    <value>true</value>
-    <description>
-      Indicate whether or not to avoid reading from stale datanodes whose
-      heartbeat messages have not been received by the namenode for more than a
-      specified time interval.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.namenode.avoid.write.stale.datanode</name>
-    <value>true</value>
-    <description>
-      Indicate whether or not to avoid writing to stale datanodes whose
-      heartbeat messages have not been received by the namenode for more than a
-      specified time interval.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.namenode.write.stale.datanode.ratio</name>
-    <value>1.0f</value>
-    <description>When the ratio of number stale datanodes to total datanodes marked is greater
-      than this ratio, stop avoiding writing to stale nodes so as to prevent causing hotspots.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.namenode.stale.datanode.interval</name>
-    <value>30000</value>
-    <description>Datanode is stale after not getting a heartbeat in this interval in ms</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.journalnode.http-address</name>
-    <value>0.0.0.0:8480</value>
-    <description>The address and port the JournalNode web UI listens on.
-      If the port is 0 then the server will start on a free port. </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.journalnode.https-address</name>
-    <value>0.0.0.0:8481</value>
-    <description>The address and port the JournalNode HTTPS server listens on.
-      If the port is 0 then the server will start on a free port. </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <!-- HDFS Short-Circuit Local Reads -->
-  <property>
-    <name>dfs.client.read.shortcircuit</name>
-    <value>true</value>
-    <display-name>HDFS Short-circuit read</display-name>
-    <description>
-      This configuration parameter turns on short-circuit local reads.
-    </description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.domain.socket.path</name>
-    <value>/var/lib/hadoop-hdfs/dn_socket</value>
-    <description>
-      This is a path to a UNIX domain socket that will be used for communication between the DataNode and local HDFS clients.
-      If the string "_PORT" is present in this path, it will be replaced by the TCP port of the DataNode.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.client.read.shortcircuit.streams.cache.size</name>
-    <value>4096</value>
-    <description>
-      The DFSClient maintains a cache of recently opened file descriptors. This
-      parameter controls the size of that cache. Setting this higher will use
-      more file descriptors, but potentially provide better performance on
-      workloads involving lots of seeks.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.namenode.name.dir.restore</name>
-    <value>true</value>
-    <description>Set to true to enable NameNode to attempt recovering a previously failed dfs.namenode.name.dir.
-      When enabled, a recovery of any failed directory is attempted during checkpoint.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.http.policy</name>
-    <value>HTTP_ONLY</value>
-    <description>
-      Decide if HTTPS(SSL) is supported on HDFS This configures the HTTP endpoint for HDFS daemons:
-      The following values are supported: - HTTP_ONLY : Service is provided only on http - HTTPS_ONLY :
-      Service is provided only on https - HTTP_AND_HTTPS : Service is provided both on http and https
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <!-- These properties exist in HDP 2.1 and higher. -->
-  <property>
-    <name>dfs.namenode.audit.log.async</name>
-    <value>true</value>
-    <description>Whether to enable async auditlog</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.namenode.fslock.fair</name>
-    <value>false</value>
-    <description>Whether fsLock is fair</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <!-- These properties exist in HDP 2.2 and higher. -->
-  <property>
-    <name>dfs.namenode.startup.delay.block.deletion.sec</name>
-    <value>3600</value>
-    <description>
-      The delay in seconds at which we will pause the blocks deletion
-      after Namenode startup. By default it's disabled.
-      In the case a directory has large number of directories and files are
-      deleted, suggested delay is one hour to give the administrator enough time
-      to notice large number of pending deletion blocks and take corrective
-      action.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.journalnode.edits.dir</name>
-    <value>/hadoop/hdfs/journalnode</value>
-    <description>The path where the JournalNode daemon will store its local state. </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.client.retry.policy.enabled</name>
-    <value>false</value>
-    <description>Enables HDFS client retry in the event of a NameNode failure.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.content-summary.limit</name>
-    <value>5000</value>
-    <description>Dfs content summary limit.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.encryption.key.provider.uri</name>
-    <description>
-      The KeyProvider to use when interacting with encryption keys used
-      when reading and writing to an encryption zone.
-    </description>
-    <value/>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>hadoop-env</type>
-        <name>keyserver_host</name>
-      </property>
-      <property>
-        <type>hadoop-env</type>
-        <name>keyserver_port</name>
-      </property>
-      <property>
-        <type>kms-env</type>
-        <name>kms_port</name>
-      </property>
-      <property>
-        <type>ranger-kms-site</type>
-        <name>ranger.service.https.attrib.ssl.enabled</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <!-- These properties exist in HDP 2.3 and higher. -->
-  <property>
-    <name>nfs.file.dump.dir</name>
-    <value>/tmp/.hdfs-nfs</value>
-    <display-name>NFSGateway dump directory</display-name>
-    <description>
-      This directory is used to temporarily save out-of-order writes before
-      writing to HDFS. For each file, the out-of-order writes are dumped after
-      they are accumulated to exceed certain threshold (e.g., 1MB) in memory.
-      One needs to make sure the directory has enough space.
-    </description>
-    <value-attributes>
-      <type>directory</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>nfs.exports.allowed.hosts</name>
-    <value>* rw</value>
-    <description>
-      By default, the export can be mounted by any client. To better control the access,
-      users can update the following property. The value string contains machine name and access privilege,
-      separated by whitespace characters. Machine name format can be single host, wildcards, and IPv4
-      networks.The access privilege uses rw or ro to specify readwrite or readonly access of the machines
-      to exports. If the access privilege is not provided, the default is read-only. Entries are separated
-      by &quot;;&quot;. For example: &quot;192.168.0.0/22 rw ; host*.example.com ; host1.test.org ro;&quot;.
-    </description>
-    <display-name>Allowed hosts</display-name>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.encrypt.data.transfer.cipher.suites</name>
-    <value>AES/CTR/NoPadding</value>
-    <description>
-      This value may be either undefined or AES/CTR/NoPadding. If defined, then 
-      dfs.encrypt.data.transfer uses the specified cipher suite for data encryption. 
-      If not defined, then only the algorithm specified in dfs.encrypt.data.transfer.algorithm 
-      is used. By default, the property is not defined.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>dfs.namenode.inode.attributes.provider.class</name>
-    <description>Enable ranger hdfs plugin</description>
-    <depends-on>
-      <property>
-        <type>ranger-hdfs-plugin-properties</type>
-        <name>ranger-hdfs-plugin-enabled</name>
-      </property>
-    </depends-on>
-    <value-attributes>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/ranger-hdfs-audit.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/ranger-hdfs-audit.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/ranger-hdfs-audit.xml
deleted file mode 100644
index 3dc46b3..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/ranger-hdfs-audit.xml
+++ /dev/null
@@ -1,124 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <!-- These properties exist in HDP 2.3 and higher. -->
-  <property>
-    <name>xasecure.audit.is.enabled</name>
-    <value>true</value>
-    <description>Is Audit enabled?</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.hdfs</name>
-    <value>true</value>
-    <display-name>Audit to HDFS</display-name>
-    <description>Is Audit to HDFS enabled?</description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>ranger-env</type>
-        <name>xasecure.audit.destination.hdfs</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.hdfs.dir</name>
-    <value>hdfs://NAMENODE_HOSTNAME:8020/ranger/audit</value>
-    <description>HDFS folder to write audit to, make sure the service user has requried permissions</description>
-    <depends-on>
-      <property>
-        <type>ranger-env</type>
-        <name>xasecure.audit.destination.hdfs.dir</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.hdfs.batch.filespool.dir</name>
-    <value>/var/log/hadoop/hdfs/audit/hdfs/spool</value>
-    <description>/var/log/hadoop/hdfs/audit/hdfs/spool</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.solr</name>
-    <value>false</value>
-    <display-name>Audit to SOLR</display-name>
-    <description>Is Solr audit enabled?</description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>ranger-env</type>
-        <name>xasecure.audit.destination.solr</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.solr.urls</name>
-    <value/>
-    <description>Solr URL</description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>ranger-admin-site</type>
-        <name>ranger.audit.solr.urls</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.solr.zookeepers</name>
-    <value>NONE</value>
-    <description>Solr Zookeeper string</description>
-    <depends-on>
-      <property>
-        <type>ranger-admin-site</type>
-        <name>ranger.audit.solr.zookeepers</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.destination.solr.batch.filespool.dir</name>
-    <value>/var/log/hadoop/hdfs/audit/solr/spool</value>
-    <description>/var/log/hadoop/hdfs/audit/solr/spool</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.audit.provider.summary.enabled</name>
-    <value>false</value>
-    <display-name>Audit provider summary enabled</display-name>
-    <description>Enable Summary audit?</description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml
deleted file mode 100644
index deede1c..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/ranger-hdfs-plugin-properties.xml
+++ /dev/null
@@ -1,88 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="true">
-  <!-- These properties exist in HDP 2.2 and higher. -->
-  <property>
-    <name>policy_user</name>
-    <value>ambari-qa</value>
-    <display-name>Policy user for HDFS</display-name>
-    <description>This user must be system user and also present at Ranger
-      admin portal</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>common.name.for.certificate</name>
-    <value/>
-    <description>Common name for certificate, this value should match what is specified in repo within ranger admin</description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger-hdfs-plugin-enabled</name>
-    <value>No</value>
-    <display-name>Enable Ranger for HDFS</display-name>
-    <description>Enable ranger hdfs plugin</description>
-    <depends-on>
-      <property>
-        <type>ranger-env</type>
-        <name>ranger-hdfs-plugin-enabled</name>
-      </property>
-    </depends-on>
-    <value-attributes>
-      <type>boolean</type>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>REPOSITORY_CONFIG_USERNAME</name>
-    <value>hadoop</value>
-    <display-name>Ranger repository config user</display-name>
-    <description>Used for repository creation on ranger admin
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>REPOSITORY_CONFIG_PASSWORD</name>
-    <value>hadoop</value>
-    <display-name>Ranger repository config password</display-name>
-    <property-type>PASSWORD</property-type>
-    <description>Used for repository creation on ranger admin
-    </description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <!-- These properties exist in HDP 2.5 and higher. -->
-  <property>
-    <name>hadoop.rpc.protection</name>
-    <value>authentication</value>
-    <description>Used for repository creation on ranger admin</description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="false" />
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/ranger-hdfs-policymgr-ssl.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/ranger-hdfs-policymgr-ssl.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/ranger-hdfs-policymgr-ssl.xml
deleted file mode 100644
index 081ec2d..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/ranger-hdfs-policymgr-ssl.xml
+++ /dev/null
@@ -1,67 +0,0 @@
-<?xml version="1.0"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <!-- These properties exist in HDP 2.3 and higher. -->
-  <property>
-    <name>xasecure.policymgr.clientssl.keystore</name>
-    <value>/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks</value>
-    <description>Java Keystore files</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.policymgr.clientssl.keystore.password</name>
-    <value>myKeyFilePassword</value>
-    <property-type>PASSWORD</property-type>
-    <description>password for keystore</description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.policymgr.clientssl.truststore</name>
-    <value>/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks</value>
-    <description>java truststore file</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.policymgr.clientssl.truststore.password</name>
-    <value>changeit</value>
-    <property-type>PASSWORD</property-type>
-    <description>java truststore password</description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.policymgr.clientssl.keystore.credential.file</name>
-    <value>jceks://file{{credential_file}}</value>
-    <description>java keystore credential file</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.policymgr.clientssl.truststore.credential.file</name>
-    <value>jceks://file{{credential_file}}</value>
-    <description>java truststore credential file</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/ranger-hdfs-security.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/ranger-hdfs-security.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/ranger-hdfs-security.xml
deleted file mode 100644
index ef7fd4f..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/ranger-hdfs-security.xml
+++ /dev/null
@@ -1,65 +0,0 @@
-<?xml version="1.0"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <!-- These properties exist in HDP 2.3 and higher. -->
-  <property>
-    <name>ranger.plugin.hdfs.service.name</name>
-    <value>{{repo_name}}</value>
-    <description>Name of the Ranger service containing Hdfs policies</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger.plugin.hdfs.policy.source.impl</name>
-    <value>org.apache.ranger.admin.client.RangerAdminRESTClient</value>
-    <description>Class to retrieve policies from the source</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger.plugin.hdfs.policy.rest.url</name>
-    <value>{{policymgr_mgr_url}}</value>
-    <description>URL to Ranger Admin</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger.plugin.hdfs.policy.rest.ssl.config.file</name>
-    <value>/etc/hadoop/conf/ranger-policymgr-ssl.xml</value>
-    <description>Path to the file containing SSL details to contact Ranger Admin</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger.plugin.hdfs.policy.pollIntervalMs</name>
-    <value>30000</value>
-    <description>How often to poll for changes in policies?</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ranger.plugin.hdfs.policy.cache.dir</name>
-    <value>/etc/ranger/{{repo_name}}/policycache</value>
-    <description>Directory where Ranger policies are cached after successful retrieval from the source</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>xasecure.add-hadoop-authorization</name>
-    <value>true</value>
-    <description>Enable/Disable the default hadoop authorization (based on rwxrwxrwx permission on the resource) if Ranger Authorization fails.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/ssl-client.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/ssl-client.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/ssl-client.xml
deleted file mode 100644
index 7e0f265..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/ssl-client.xml
+++ /dev/null
@@ -1,70 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<configuration>
-  <property>
-    <name>ssl.client.truststore.location</name>
-    <value>/etc/security/clientKeys/all.jks</value>
-    <description>Location of the trust store file.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ssl.client.truststore.type</name>
-    <value>jks</value>
-    <description>Optional. Default value is "jks".</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ssl.client.truststore.password</name>
-    <value>bigdata</value>
-    <property-type>PASSWORD</property-type>
-    <description>Password to open the trust store file.</description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ssl.client.truststore.reload.interval</name>
-    <value>10000</value>
-    <description>Truststore reload interval, in milliseconds.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ssl.client.keystore.type</name>
-    <value>jks</value>
-    <description>Optional. Default value is "jks".</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ssl.client.keystore.location</name>
-    <value>/etc/security/clientKeys/keystore.jks</value>
-    <description>Location of the keystore file.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ssl.client.keystore.password</name>
-    <value>bigdata</value>
-    <property-type>PASSWORD</property-type>
-    <description>Password to open the keystore file.</description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/984d4605/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/ssl-server.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/ssl-server.xml b/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/ssl-server.xml
deleted file mode 100644
index 2177cef..0000000
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/services/HDFS/configuration/ssl-server.xml
+++ /dev/null
@@ -1,80 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<configuration>
-  <property>
-    <name>ssl.server.truststore.location</name>
-    <value>/etc/security/serverKeys/all.jks</value>
-    <description>Location of the trust store file.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ssl.server.truststore.type</name>
-    <value>jks</value>
-    <description>Optional. Default value is "jks".</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ssl.server.truststore.password</name>
-    <value>bigdata</value>
-    <property-type>PASSWORD</property-type>
-    <description>Password to open the trust store file.</description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ssl.server.truststore.reload.interval</name>
-    <value>10000</value>
-    <description>Truststore reload interval, in milliseconds.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ssl.server.keystore.type</name>
-    <value>jks</value>
-    <description>Optional. Default value is "jks".</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ssl.server.keystore.location</name>
-    <value>/etc/security/serverKeys/keystore.jks</value>
-    <description>Location of the keystore file.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ssl.server.keystore.password</name>
-    <value>bigdata</value>
-    <property-type>PASSWORD</property-type>
-    <description>Password to open the keystore file.</description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>ssl.server.keystore.keypassword</name>
-    <value>bigdata</value>
-    <property-type>PASSWORD</property-type>
-    <description>Password for private key in keystore file.</description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>